aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c6
-rw-r--r--drivers/net/8139cp.c5
-rw-r--r--drivers/net/8139too.c5
-rw-r--r--drivers/net/Kconfig43
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/amd8111e.c23
-rw-r--r--drivers/net/arm/at91_ether.c6
-rw-r--r--drivers/net/atl1e/atl1e.h1
-rw-r--r--drivers/net/atl1e/atl1e_hw.c4
-rw-r--r--drivers/net/atlx/atl1.c24
-rw-r--r--drivers/net/atlx/atl1.h2
-rw-r--r--drivers/net/atlx/atl2.c8
-rw-r--r--drivers/net/ax88796.c6
-rw-r--r--drivers/net/bnx2.c50
-rw-r--r--drivers/net/bnx2.h6
-rw-r--r--drivers/net/bnx2x_init.h9
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/bonding/bond_alb.c13
-rw-r--r--drivers/net/bonding/bond_main.c68
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/cris/eth_v10.c4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/t3_hw.c8
-rw-r--r--drivers/net/dm9000.c9
-rw-r--r--drivers/net/e100.c20
-rw-r--r--drivers/net/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c8
-rw-r--r--drivers/net/e1000e/ich8lan.c9
-rw-r--r--drivers/net/e1000e/netdev.c26
-rw-r--r--drivers/net/e1000e/param.c25
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c25
-rw-r--r--drivers/net/ehea/ehea_qmr.c176
-rw-r--r--drivers/net/ehea/ehea_qmr.h5
-rw-r--r--drivers/net/enc28j60.c18
-rw-r--r--drivers/net/fec_mpc52xx.c18
-rw-r--r--drivers/net/fec_mpc52xx_phy.c55
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c6
-rw-r--r--drivers/net/gianfar.c55
-rw-r--r--drivers/net/gianfar_mii.c21
-rw-r--r--drivers/net/gianfar_mii.h3
-rw-r--r--drivers/net/hp-plus.c2
-rw-r--r--drivers/net/ibm_newemac/core.c10
-rw-r--r--drivers/net/ibm_newemac/mal.c15
-rw-r--r--drivers/net/igb/igb_ethtool.c8
-rw-r--r--drivers/net/igb/igb_main.c64
-rw-r--r--drivers/net/ipg.c8
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c68
-rw-r--r--drivers/net/jme.c21
-rw-r--r--drivers/net/jme.h2
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/mlx4/Makefile7
-rw-r--r--drivers/net/mlx4/alloc.c97
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c146
-rw-r--r--drivers/net/mlx4/en_main.c253
-rw-r--r--drivers/net/mlx4/en_netdev.c1088
-rw-r--r--drivers/net/mlx4/en_params.c482
-rw-r--r--drivers/net/mlx4/en_port.c261
-rw-r--r--drivers/net/mlx4/en_port.h570
-rw-r--r--drivers/net/mlx4/en_resources.c96
-rw-r--r--drivers/net/mlx4/en_rx.c1080
-rw-r--r--drivers/net/mlx4/en_tx.c820
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/fw.c20
-rw-r--r--drivers/net/mlx4/fw.h7
-rw-r--r--drivers/net/mlx4/main.c295
-rw-r--r--drivers/net/mlx4/mcg.c4
-rw-r--r--drivers/net/mlx4/mlx4.h55
-rw-r--r--drivers/net/mlx4/mlx4_en.h561
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mlx4/pd.c4
-rw-r--r--drivers/net/mlx4/port.c319
-rw-r--r--drivers/net/mlx4/qp.c81
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--drivers/net/mv643xx_eth.c14
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/niu.c299
-rw-r--r--drivers/net/niu.h13
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/marvell.c66
-rw-r--r--drivers/net/phy/mdio_bus.c7
-rw-r--r--drivers/net/phy/phy_device.c47
-rw-r--r--drivers/net/phy/vitesse.c64
-rw-r--r--drivers/net/pppoe.c6
-rw-r--r--drivers/net/pppol2tp.c1
-rw-r--r--drivers/net/qla3xxx.c19
-rw-r--r--drivers/net/qlge/qlge.h5
-rw-r--r--drivers/net/qlge/qlge_main.c89
-rw-r--r--drivers/net/r8169.c74
-rw-r--r--drivers/net/sfc/ethtool.c4
-rw-r--r--drivers/net/sh_eth.c4
-rw-r--r--drivers/net/sis190.c1
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/smc911x.c56
-rw-r--r--drivers/net/smc911x.h6
-rw-r--r--drivers/net/smc91x.c14
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/starfire.c5
-rw-r--r--drivers/net/sungem.c144
-rw-r--r--drivers/net/tlan.c23
-rw-r--r--drivers/net/tulip/dmfe.c12
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/ucc_geth_ethtool.c7
-rw-r--r--drivers/net/usb/asix.c8
-rw-r--r--drivers/net/usb/dm9601.c15
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/via-velocity.c13
-rw-r--r--drivers/net/wan/syncppp.c5
-rw-r--r--drivers/net/wan/z85230.c1
-rw-r--r--drivers/net/wireless/ath5k/base.c95
-rw-r--r--drivers/net/wireless/ath5k/base.h4
-rw-r--r--drivers/net/wireless/ath5k/debug.c12
-rw-r--r--drivers/net/wireless/ath5k/desc.c16
-rw-r--r--drivers/net/wireless/ath5k/initvals.c2
-rw-r--r--drivers/net/wireless/ath5k/reset.c22
-rw-r--r--drivers/net/wireless/ath9k/beacon.c10
-rw-r--r--drivers/net/wireless/ath9k/recv.c19
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h5
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c14
-rw-r--r--drivers/net/wireless/libertas/cmd.c4
-rw-r--r--drivers/net/wireless/libertas/rx.c2
-rw-r--r--drivers/net/wireless/libertas/scan.c4
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/orinoco.c42
-rw-r--r--drivers/net/wireless/p54/p54common.c31
-rw-r--r--drivers/net/wireless/p54/p54pci.c132
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig2
-rw-r--r--drivers/net/wireless/rtl8187_dev.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/net/xtsonic.c319
149 files changed, 8277 insertions, 892 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 3a7bc524af33..c7a4f3bcc2bc 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -94,7 +94,7 @@
94#include <asm/io.h> 94#include <asm/io.h>
95#include <asm/irq.h> 95#include <asm/irq.h>
96 96
97static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; 97static char version[] __devinitdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
98 98
99#ifdef EL3_DEBUG 99#ifdef EL3_DEBUG
100static int el3_debug = EL3_DEBUG; 100static int el3_debug = EL3_DEBUG;
@@ -186,7 +186,7 @@ static int max_interrupt_work = 10;
186static int nopnp; 186static int nopnp;
187#endif 187#endif
188 188
189static int __init el3_common_init(struct net_device *dev); 189static int __devinit el3_common_init(struct net_device *dev);
190static void el3_common_remove(struct net_device *dev); 190static void el3_common_remove(struct net_device *dev);
191static ushort id_read_eeprom(int index); 191static ushort id_read_eeprom(int index);
192static ushort read_eeprom(int ioaddr, int index); 192static ushort read_eeprom(int ioaddr, int index);
@@ -537,7 +537,7 @@ static struct mca_driver el3_mca_driver = {
537static int mca_registered; 537static int mca_registered;
538#endif /* CONFIG_MCA */ 538#endif /* CONFIG_MCA */
539 539
540static int __init el3_common_init(struct net_device *dev) 540static int __devinit el3_common_init(struct net_device *dev)
541{ 541{
542 struct el3_private *lp = netdev_priv(dev); 542 struct el3_private *lp = netdev_priv(dev);
543 int err; 543 int err;
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 85fa40a0a667..9ba1f0b46429 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1836,10 +1836,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1836 1836
1837 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 1837 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1838 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { 1838 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1839 dev_err(&pdev->dev, 1839 dev_info(&pdev->dev,
1840 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", 1840 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1841 pdev->vendor, pdev->device, pdev->revision); 1841 pdev->vendor, pdev->device, pdev->revision);
1842 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1843 return -ENODEV; 1842 return -ENODEV;
1844 } 1843 }
1845 1844
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 0daf8c15e381..63f906b04899 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -946,10 +946,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
946 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 946 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
947 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) { 947 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
948 dev_info(&pdev->dev, 948 dev_info(&pdev->dev,
949 "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n", 949 "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n",
950 pdev->vendor, pdev->device, pdev->revision); 950 pdev->vendor, pdev->device, pdev->revision);
951 dev_info(&pdev->dev, 951 return -ENODEV;
952 "Use the \"8139cp\" driver for improved performance and stability.\n");
953 } 952 }
954 953
955 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 954 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ad301ace6085..231eeaf1d552 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -464,6 +464,12 @@ config MIPS_JAZZ_SONIC
464 This is the driver for the onboard card of MIPS Magnum 4000, 464 This is the driver for the onboard card of MIPS Magnum 4000,
465 Acer PICA, Olivetti M700-10 and a few other identical OEM systems. 465 Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
466 466
467config XTENSA_XT2000_SONIC
468 tristate "Xtensa XT2000 onboard SONIC Ethernet support"
469 depends on XTENSA_PLATFORM_XT2000
470 help
471 This is the driver for the onboard card of the Xtensa XT2000 board.
472
467config MIPS_AU1X00_ENET 473config MIPS_AU1X00_ENET
468 bool "MIPS AU1000 Ethernet support" 474 bool "MIPS AU1000 Ethernet support"
469 depends on SOC_AU1X00 475 depends on SOC_AU1X00
@@ -888,7 +894,7 @@ config SMC91X
888 select CRC32 894 select CRC32
889 select MII 895 select MII
890 depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \ 896 depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \
891 SOC_AU1X00 || BLACKFIN || MN10300 897 MIPS || BLACKFIN || MN10300
892 help 898 help
893 This is a driver for SMC's 91x series of Ethernet chipsets, 899 This is a driver for SMC's 91x series of Ethernet chipsets,
894 including the SMC91C94 and the SMC91C111. Say Y if you want it 900 including the SMC91C94 and the SMC91C111. Say Y if you want it
@@ -960,7 +966,7 @@ config SMC911X
960 tristate "SMSC LAN911[5678] support" 966 tristate "SMSC LAN911[5678] support"
961 select CRC32 967 select CRC32
962 select MII 968 select MII
963 depends on ARCH_PXA || SUPERH 969 depends on ARM || SUPERH
964 help 970 help
965 This is a driver for SMSC's LAN911x series of Ethernet chipsets 971 This is a driver for SMSC's LAN911x series of Ethernet chipsets
966 including the new LAN9115, LAN9116, LAN9117, and LAN9118. 972 including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1819,9 +1825,10 @@ config FEC2
1819 1825
1820config FEC_MPC52xx 1826config FEC_MPC52xx
1821 tristate "MPC52xx FEC driver" 1827 tristate "MPC52xx FEC driver"
1822 depends on PPC_MPC52xx && PPC_BESTCOMM_FEC 1828 depends on PPC_MPC52xx && PPC_BESTCOMM
1823 select CRC32 1829 select CRC32
1824 select PHYLIB 1830 select PHYLIB
1831 select PPC_BESTCOMM_FEC
1825 ---help--- 1832 ---help---
1826 This option enables support for the MPC5200's on-chip 1833 This option enables support for the MPC5200's on-chip
1827 Fast Ethernet Controller 1834 Fast Ethernet Controller
@@ -2003,6 +2010,15 @@ config IGB_LRO
2003 2010
2004 If in doubt, say N. 2011 If in doubt, say N.
2005 2012
2013config IGB_DCA
2014 bool "Direct Cache Access (DCA) Support"
2015 default y
2016 depends on IGB && DCA && !(IGB=y && DCA=m)
2017 ---help---
2018 Say Y here if you want to use Direct Cache Access (DCA) in the
2019 driver. DCA is a method for warming the CPU cache before data
2020 is used, with the intent of lessening the impact of cache misses.
2021
2006source "drivers/net/ixp2000/Kconfig" 2022source "drivers/net/ixp2000/Kconfig"
2007 2023
2008config MYRI_SBUS 2024config MYRI_SBUS
@@ -2426,9 +2442,13 @@ config IXGBE
2426 will be called ixgbe. 2442 will be called ixgbe.
2427 2443
2428config IXGBE_DCA 2444config IXGBE_DCA
2429 bool 2445 bool "Direct Cache Access (DCA) Support"
2430 default y 2446 default y
2431 depends on IXGBE && DCA && !(IXGBE=y && DCA=m) 2447 depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
2448 ---help---
2449 Say Y here if you want to use Direct Cache Access (DCA) in the
2450 driver. DCA is a method for warming the CPU cache before data
2451 is used, with the intent of lessening the impact of cache misses.
2432 2452
2433config IXGB 2453config IXGB
2434 tristate "Intel(R) PRO/10GbE support" 2454 tristate "Intel(R) PRO/10GbE support"
@@ -2478,9 +2498,13 @@ config MYRI10GE
2478 will be called myri10ge. 2498 will be called myri10ge.
2479 2499
2480config MYRI10GE_DCA 2500config MYRI10GE_DCA
2481 bool 2501 bool "Direct Cache Access (DCA) Support"
2482 default y 2502 default y
2483 depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m) 2503 depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m)
2504 ---help---
2505 Say Y here if you want to use Direct Cache Access (DCA) in the
2506 driver. DCA is a method for warming the CPU cache before data
2507 is used, with the intent of lessening the impact of cache misses.
2484 2508
2485config NETXEN_NIC 2509config NETXEN_NIC
2486 tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" 2510 tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
@@ -2504,6 +2528,15 @@ config PASEMI_MAC
2504 This driver supports the on-chip 1/10Gbit Ethernet controller on 2528 This driver supports the on-chip 1/10Gbit Ethernet controller on
2505 PA Semi's PWRficient line of chips. 2529 PA Semi's PWRficient line of chips.
2506 2530
2531config MLX4_EN
2532 tristate "Mellanox Technologies 10Gbit Ethernet support"
2533 depends on PCI && INET
2534 select MLX4_CORE
2535 select INET_LRO
2536 help
2537 This driver supports Mellanox Technologies ConnectX Ethernet
2538 devices.
2539
2507config MLX4_CORE 2540config MLX4_CORE
2508 tristate 2541 tristate
2509 depends on PCI 2542 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa2510b2e609..017383ad5ec6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -114,7 +114,7 @@ obj-$(CONFIG_EL2) += 3c503.o 8390p.o
114obj-$(CONFIG_NE2000) += ne.o 8390p.o 114obj-$(CONFIG_NE2000) += ne.o 8390p.o
115obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o 115obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
116obj-$(CONFIG_HPLAN) += hp.o 8390p.o 116obj-$(CONFIG_HPLAN) += hp.o 8390p.o
117obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390.o 117obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
118obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o 118obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
119obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o 119obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
120obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o 120obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
@@ -227,6 +227,8 @@ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
227obj-$(CONFIG_MLX4_CORE) += mlx4/ 227obj-$(CONFIG_MLX4_CORE) += mlx4/
228obj-$(CONFIG_ENC28J60) += enc28j60.o 228obj-$(CONFIG_ENC28J60) += enc28j60.o
229 229
230obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
231
230obj-$(CONFIG_MACB) += macb.o 232obj-$(CONFIG_MACB) += macb.o
231 233
232obj-$(CONFIG_ARM) += arm/ 234obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index c54967f7942a..07a6697e3635 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -644,10 +644,6 @@ This function frees the transmiter and receiver descriptor rings.
644*/ 644*/
645static void amd8111e_free_ring(struct amd8111e_priv* lp) 645static void amd8111e_free_ring(struct amd8111e_priv* lp)
646{ 646{
647
648 /* Free transmit and receive skbs */
649 amd8111e_free_skbs(lp->amd8111e_net_dev);
650
651 /* Free transmit and receive descriptor rings */ 647 /* Free transmit and receive descriptor rings */
652 if(lp->rx_ring){ 648 if(lp->rx_ring){
653 pci_free_consistent(lp->pci_dev, 649 pci_free_consistent(lp->pci_dev,
@@ -833,12 +829,14 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
833 829
834 } while(intr0 & RINT0); 830 } while(intr0 & RINT0);
835 831
836 /* Receive descriptor is empty now */ 832 if (rx_pkt_limit > 0) {
837 spin_lock_irqsave(&lp->lock, flags); 833 /* Receive descriptor is empty now */
838 __netif_rx_complete(dev, napi); 834 spin_lock_irqsave(&lp->lock, flags);
839 writel(VAL0|RINTEN0, mmio + INTEN0); 835 __netif_rx_complete(dev, napi);
840 writel(VAL2 | RDMD0, mmio + CMD0); 836 writel(VAL0|RINTEN0, mmio + INTEN0);
841 spin_unlock_irqrestore(&lp->lock, flags); 837 writel(VAL2 | RDMD0, mmio + CMD0);
838 spin_unlock_irqrestore(&lp->lock, flags);
839 }
842 840
843rx_not_empty: 841rx_not_empty:
844 return num_rx_pkt; 842 return num_rx_pkt;
@@ -1231,7 +1229,9 @@ static int amd8111e_close(struct net_device * dev)
1231 1229
1232 amd8111e_disable_interrupt(lp); 1230 amd8111e_disable_interrupt(lp);
1233 amd8111e_stop_chip(lp); 1231 amd8111e_stop_chip(lp);
1234 amd8111e_free_ring(lp); 1232
1233 /* Free transmit and receive skbs */
1234 amd8111e_free_skbs(lp->amd8111e_net_dev);
1235 1235
1236 netif_carrier_off(lp->amd8111e_net_dev); 1236 netif_carrier_off(lp->amd8111e_net_dev);
1237 1237
@@ -1241,6 +1241,7 @@ static int amd8111e_close(struct net_device * dev)
1241 1241
1242 spin_unlock_irq(&lp->lock); 1242 spin_unlock_irq(&lp->lock);
1243 free_irq(dev->irq, dev); 1243 free_irq(dev->irq, dev);
1244 amd8111e_free_ring(lp);
1244 1245
1245 /* Update the statistics before closing */ 1246 /* Update the statistics before closing */
1246 amd8111e_get_stats(dev); 1247 amd8111e_get_stats(dev);
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 0fa53464efb2..6f431a887e7e 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -1080,7 +1080,8 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
1080 init_timer(&lp->check_timer); 1080 init_timer(&lp->check_timer);
1081 lp->check_timer.data = (unsigned long)dev; 1081 lp->check_timer.data = (unsigned long)dev;
1082 lp->check_timer.function = at91ether_check_link; 1082 lp->check_timer.function = at91ether_check_link;
1083 } 1083 } else if (lp->board_data.phy_irq_pin >= 32)
1084 gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
1084 1085
1085 /* Display ethernet banner */ 1086 /* Display ethernet banner */
1086 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%s)\n", 1087 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%s)\n",
@@ -1167,6 +1168,9 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
1167 struct net_device *dev = platform_get_drvdata(pdev); 1168 struct net_device *dev = platform_get_drvdata(pdev);
1168 struct at91_private *lp = netdev_priv(dev); 1169 struct at91_private *lp = netdev_priv(dev);
1169 1170
1171 if (lp->board_data.phy_irq_pin >= 32)
1172 gpio_free(lp->board_data.phy_irq_pin);
1173
1170 unregister_netdev(dev); 1174 unregister_netdev(dev);
1171 free_irq(dev->irq, dev); 1175 free_irq(dev->irq, dev);
1172 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys); 1176 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h
index b645fa0f3f64..c49550d507a0 100644
--- a/drivers/net/atl1e/atl1e.h
+++ b/drivers/net/atl1e/atl1e.h
@@ -46,7 +46,6 @@
46#include <linux/vmalloc.h> 46#include <linux/vmalloc.h>
47#include <linux/pagemap.h> 47#include <linux/pagemap.h>
48#include <linux/tcp.h> 48#include <linux/tcp.h>
49#include <linux/mii.h>
50#include <linux/ethtool.h> 49#include <linux/ethtool.h>
51#include <linux/if_vlan.h> 50#include <linux/if_vlan.h>
52#include <linux/workqueue.h> 51#include <linux/workqueue.h>
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 8cbc1b59bd62..4a7700620119 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -163,9 +163,6 @@ int atl1e_read_mac_addr(struct atl1e_hw *hw)
163 * atl1e_hash_mc_addr 163 * atl1e_hash_mc_addr
164 * purpose 164 * purpose
165 * set hash value for a multicast address 165 * set hash value for a multicast address
166 * hash calcu processing :
167 * 1. calcu 32bit CRC for multicast address
168 * 2. reverse crc with MSB to LSB
169 */ 166 */
170u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr) 167u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr)
171{ 168{
@@ -174,7 +171,6 @@ u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr)
174 int i; 171 int i;
175 172
176 crc32 = ether_crc_le(6, mc_addr); 173 crc32 = ether_crc_le(6, mc_addr);
177 crc32 = ~crc32;
178 for (i = 0; i < 32; i++) 174 for (i = 0; i < 32; i++)
179 value |= (((crc32 >> i) & 1) << (31 - i)); 175 value |= (((crc32 >> i) & 1) << (31 - i));
180 176
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3cf59a7f5a1c..aef403d299ee 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2310,7 +2310,8 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
2310 if (tpd != ptpd) 2310 if (tpd != ptpd)
2311 memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); 2311 memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
2312 tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 2312 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2313 tpd->word2 = (cpu_to_le16(buffer_info->length) & 2313 tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT);
2314 tpd->word2 |= (cpu_to_le16(buffer_info->length) &
2314 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; 2315 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
2315 2316
2316 /* 2317 /*
@@ -2409,8 +2410,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2409 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | 2410 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
2410 ((vlan_tag >> 9) & 0x8); 2411 ((vlan_tag >> 9) & 0x8);
2411 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; 2412 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
2412 ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) << 2413 ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) <<
2413 TPD_VL_TAGGED_SHIFT; 2414 TPD_VLANTAG_SHIFT;
2414 } 2415 }
2415 2416
2416 tso = atl1_tso(adapter, skb, ptpd); 2417 tso = atl1_tso(adapter, skb, ptpd);
@@ -3403,14 +3404,8 @@ static void atl1_get_wol(struct net_device *netdev,
3403{ 3404{
3404 struct atl1_adapter *adapter = netdev_priv(netdev); 3405 struct atl1_adapter *adapter = netdev_priv(netdev);
3405 3406
3406 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; 3407 wol->supported = WAKE_MAGIC;
3407 wol->wolopts = 0; 3408 wol->wolopts = 0;
3408 if (adapter->wol & ATLX_WUFC_EX)
3409 wol->wolopts |= WAKE_UCAST;
3410 if (adapter->wol & ATLX_WUFC_MC)
3411 wol->wolopts |= WAKE_MCAST;
3412 if (adapter->wol & ATLX_WUFC_BC)
3413 wol->wolopts |= WAKE_BCAST;
3414 if (adapter->wol & ATLX_WUFC_MAG) 3409 if (adapter->wol & ATLX_WUFC_MAG)
3415 wol->wolopts |= WAKE_MAGIC; 3410 wol->wolopts |= WAKE_MAGIC;
3416 return; 3411 return;
@@ -3421,15 +3416,10 @@ static int atl1_set_wol(struct net_device *netdev,
3421{ 3416{
3422 struct atl1_adapter *adapter = netdev_priv(netdev); 3417 struct atl1_adapter *adapter = netdev_priv(netdev);
3423 3418
3424 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 3419 if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
3420 WAKE_ARP | WAKE_MAGICSECURE))
3425 return -EOPNOTSUPP; 3421 return -EOPNOTSUPP;
3426 adapter->wol = 0; 3422 adapter->wol = 0;
3427 if (wol->wolopts & WAKE_UCAST)
3428 adapter->wol |= ATLX_WUFC_EX;
3429 if (wol->wolopts & WAKE_MCAST)
3430 adapter->wol |= ATLX_WUFC_MC;
3431 if (wol->wolopts & WAKE_BCAST)
3432 adapter->wol |= ATLX_WUFC_BC;
3433 if (wol->wolopts & WAKE_MAGIC) 3423 if (wol->wolopts & WAKE_MAGIC)
3434 adapter->wol |= ATLX_WUFC_MAG; 3424 adapter->wol |= ATLX_WUFC_MAG;
3435 return 0; 3425 return 0;
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index a5015b14a429..ffa73fc8d95e 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -504,7 +504,7 @@ struct rx_free_desc {
504#define TPD_PKTNT_MASK 0x0001 504#define TPD_PKTNT_MASK 0x0001
505#define TPD_PKTINT_SHIFT 15 505#define TPD_PKTINT_SHIFT 15
506#define TPD_VLANTAG_MASK 0xFFFF 506#define TPD_VLANTAG_MASK 0xFFFF
507#define TPD_VLAN_SHIFT 16 507#define TPD_VLANTAG_SHIFT 16
508 508
509/* tpd word 3 bits 0:13 */ 509/* tpd word 3 bits 0:13 */
510#define TPD_EOP_MASK 0x0001 510#define TPD_EOP_MASK 0x0001
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index f5bdc92c1a65..8571e8c0bc67 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1690,9 +1690,11 @@ static int atl2_resume(struct pci_dev *pdev)
1690 1690
1691 ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 1691 ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
1692 1692
1693 err = atl2_request_irq(adapter); 1693 if (netif_running(netdev)) {
1694 if (netif_running(netdev) && err) 1694 err = atl2_request_irq(adapter);
1695 return err; 1695 if (err)
1696 return err;
1697 }
1696 1698
1697 atl2_reset_hw(&adapter->hw); 1699 atl2_reset_hw(&adapter->hw);
1698 1700
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 4207d6efddc0..9a314d88e7b6 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -838,12 +838,12 @@ static int ax_probe(struct platform_device *pdev)
838 838
839 /* find the platform resources */ 839 /* find the platform resources */
840 840
841 dev->irq = platform_get_irq(pdev, 0); 841 ret = platform_get_irq(pdev, 0);
842 if (dev->irq < 0) { 842 if (ret < 0) {
843 dev_err(&pdev->dev, "no IRQ specified\n"); 843 dev_err(&pdev->dev, "no IRQ specified\n");
844 ret = -ENXIO;
845 goto exit_mem; 844 goto exit_mem;
846 } 845 }
846 dev->irq = ret;
847 847
848 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 848 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
849 if (res == NULL) { 849 if (res == NULL) {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 430d430bce29..9e8222f9e90e 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -543,9 +543,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
543 for (j = 0; j < bp->rx_max_pg_ring; j++) { 543 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544 if (rxr->rx_pg_desc_ring[j]) 544 if (rxr->rx_pg_desc_ring[j])
545 pci_free_consistent(bp->pdev, RXBD_RING_SIZE, 545 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546 rxr->rx_pg_desc_ring[i], 546 rxr->rx_pg_desc_ring[j],
547 rxr->rx_pg_desc_mapping[i]); 547 rxr->rx_pg_desc_mapping[j]);
548 rxr->rx_pg_desc_ring[i] = NULL; 548 rxr->rx_pg_desc_ring[j] = NULL;
549 } 549 }
550 if (rxr->rx_pg_ring) 550 if (rxr->rx_pg_ring)
551 vfree(rxr->rx_pg_ring); 551 vfree(rxr->rx_pg_ring);
@@ -3144,6 +3144,28 @@ bnx2_has_work(struct bnx2_napi *bnapi)
3144 return 0; 3144 return 0;
3145} 3145}
3146 3146
3147static void
3148bnx2_chk_missed_msi(struct bnx2 *bp)
3149{
3150 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3151 u32 msi_ctrl;
3152
3153 if (bnx2_has_work(bnapi)) {
3154 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3155 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3156 return;
3157
3158 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3159 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3160 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3161 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3162 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3163 }
3164 }
3165
3166 bp->idle_chk_status_idx = bnapi->last_status_idx;
3167}
3168
3147static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) 3169static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3148{ 3170{
3149 struct status_block *sblk = bnapi->status_blk.msi; 3171 struct status_block *sblk = bnapi->status_blk.msi;
@@ -3218,14 +3240,15 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3218 3240
3219 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3241 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3220 3242
3221 if (unlikely(work_done >= budget))
3222 break;
3223
3224 /* bnapi->last_status_idx is used below to tell the hw how 3243 /* bnapi->last_status_idx is used below to tell the hw how
3225 * much work has been processed, so we must read it before 3244 * much work has been processed, so we must read it before
3226 * checking for more work. 3245 * checking for more work.
3227 */ 3246 */
3228 bnapi->last_status_idx = sblk->status_idx; 3247 bnapi->last_status_idx = sblk->status_idx;
3248
3249 if (unlikely(work_done >= budget))
3250 break;
3251
3229 rmb(); 3252 rmb();
3230 if (likely(!bnx2_has_work(bnapi))) { 3253 if (likely(!bnx2_has_work(bnapi))) {
3231 netif_rx_complete(bp->dev, napi); 3254 netif_rx_complete(bp->dev, napi);
@@ -4570,6 +4593,8 @@ bnx2_init_chip(struct bnx2 *bp)
4570 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) 4593 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4571 bp->bnx2_napi[i].last_status_idx = 0; 4594 bp->bnx2_napi[i].last_status_idx = 0;
4572 4595
4596 bp->idle_chk_status_idx = 0xffff;
4597
4573 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; 4598 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4574 4599
4575 /* Set up how to generate a link change interrupt. */ 4600 /* Set up how to generate a link change interrupt. */
@@ -5718,6 +5743,10 @@ bnx2_timer(unsigned long data)
5718 if (atomic_read(&bp->intr_sem) != 0) 5743 if (atomic_read(&bp->intr_sem) != 0)
5719 goto bnx2_restart_timer; 5744 goto bnx2_restart_timer;
5720 5745
5746 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5747 BNX2_FLAG_USING_MSI)
5748 bnx2_chk_missed_msi(bp);
5749
5721 bnx2_send_heart_beat(bp); 5750 bnx2_send_heart_beat(bp);
5722 5751
5723 bp->stats_blk->stat_FwRxDrop = 5752 bp->stats_blk->stat_FwRxDrop =
@@ -7204,10 +7233,13 @@ static void
7204poll_bnx2(struct net_device *dev) 7233poll_bnx2(struct net_device *dev)
7205{ 7234{
7206 struct bnx2 *bp = netdev_priv(dev); 7235 struct bnx2 *bp = netdev_priv(dev);
7236 int i;
7207 7237
7208 disable_irq(bp->pdev->irq); 7238 for (i = 0; i < bp->irq_nvecs; i++) {
7209 bnx2_interrupt(bp->pdev->irq, dev); 7239 disable_irq(bp->irq_tbl[i].vector);
7210 enable_irq(bp->pdev->irq); 7240 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7241 enable_irq(bp->irq_tbl[i].vector);
7242 }
7211} 7243}
7212#endif 7244#endif
7213 7245
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 617d95340160..0b032c3c7b61 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -378,6 +378,9 @@ struct l2_fhdr {
378 * pci_config_l definition 378 * pci_config_l definition
379 * offset: 0000 379 * offset: 0000
380 */ 380 */
381#define BNX2_PCICFG_MSI_CONTROL 0x00000058
382#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16)
383
381#define BNX2_PCICFG_MISC_CONFIG 0x00000068 384#define BNX2_PCICFG_MISC_CONFIG 0x00000068
382#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) 385#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2)
383#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) 386#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3)
@@ -6863,6 +6866,9 @@ struct bnx2 {
6863 6866
6864 u8 num_tx_rings; 6867 u8 num_tx_rings;
6865 u8 num_rx_rings; 6868 u8 num_rx_rings;
6869
6870 u32 idle_chk_status_idx;
6871
6866}; 6872};
6867 6873
6868#define REG_RD(bp, offset) \ 6874#define REG_RD(bp, offset) \
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 130927cfc75b..a6c0b3abba29 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -564,14 +564,15 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
564 564
565static void bnx2x_init_pxp(struct bnx2x *bp) 565static void bnx2x_init_pxp(struct bnx2x *bp)
566{ 566{
567 u16 devctl;
567 int r_order, w_order; 568 int r_order, w_order;
568 u32 val, i; 569 u32 val, i;
569 570
570 pci_read_config_word(bp->pdev, 571 pci_read_config_word(bp->pdev,
571 bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val); 572 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
572 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", (u16)val); 573 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
573 w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 574 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
574 r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12); 575 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
575 576
576 if (r_order > MAX_RD_ORD) { 577 if (r_order > MAX_RD_ORD) {
577 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n", 578 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fce745148ff9..600210d7eff9 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
59#include "bnx2x.h" 59#include "bnx2x.h"
60#include "bnx2x_init.h" 60#include "bnx2x_init.h"
61 61
62#define DRV_MODULE_VERSION "1.45.22" 62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/09/09" 63#define DRV_MODULE_RELDATE "2008/11/03"
64#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
65 65
66/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -6481,6 +6481,7 @@ load_int_disable:
6481 bnx2x_free_irq(bp); 6481 bnx2x_free_irq(bp);
6482load_error: 6482load_error:
6483 bnx2x_free_mem(bp); 6483 bnx2x_free_mem(bp);
6484 bp->port.pmf = 0;
6484 6485
6485 /* TBD we really need to reset the chip 6486 /* TBD we really need to reset the chip
6486 if we want to recover from this */ 6487 if we want to recover from this */
@@ -6791,6 +6792,7 @@ unload_error:
6791 /* Report UNLOAD_DONE to MCP */ 6792 /* Report UNLOAD_DONE to MCP */
6792 if (!BP_NOMCP(bp)) 6793 if (!BP_NOMCP(bp))
6793 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 6794 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6795 bp->port.pmf = 0;
6794 6796
6795 /* Free SKBs, SGEs, TPA pool and driver internals */ 6797 /* Free SKBs, SGEs, TPA pool and driver internals */
6796 bnx2x_free_skbs(bp); 6798 bnx2x_free_skbs(bp);
@@ -10204,8 +10206,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10204 return -ENOMEM; 10206 return -ENOMEM;
10205 } 10207 }
10206 10208
10207 netif_carrier_off(dev);
10208
10209 bp = netdev_priv(dev); 10209 bp = netdev_priv(dev);
10210 bp->msglevel = debug; 10210 bp->msglevel = debug;
10211 10211
@@ -10229,6 +10229,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10229 goto init_one_exit; 10229 goto init_one_exit;
10230 } 10230 }
10231 10231
10232 netif_carrier_off(dev);
10233
10232 bp->common.name = board_info[ent->driver_data].name; 10234 bp->common.name = board_info[ent->driver_data].name;
10233 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," 10235 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10234 " IRQ %d, ", dev->name, bp->common.name, 10236 " IRQ %d, ", dev->name, bp->common.name,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index ade5f3f6693b..87437c788476 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -169,11 +169,14 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
169 /* clear slave from tx_hashtbl */ 169 /* clear slave from tx_hashtbl */
170 tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl; 170 tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
171 171
172 index = SLAVE_TLB_INFO(slave).head; 172 /* skip this if we've already freed the tx hash table */
173 while (index != TLB_NULL_INDEX) { 173 if (tx_hash_table) {
174 u32 next_index = tx_hash_table[index].next; 174 index = SLAVE_TLB_INFO(slave).head;
175 tlb_init_table_entry(&tx_hash_table[index], save_load); 175 while (index != TLB_NULL_INDEX) {
176 index = next_index; 176 u32 next_index = tx_hash_table[index].next;
177 tlb_init_table_entry(&tx_hash_table[index], save_load);
178 index = next_index;
179 }
177 } 180 }
178 181
179 tlb_init_slave(slave); 182 tlb_init_slave(slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8e2be24f3fe4..a3efba59eee9 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1341,18 +1341,24 @@ static int bond_compute_features(struct bonding *bond)
1341 int i; 1341 int i;
1342 1342
1343 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); 1343 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES);
1344 features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 1344 features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM;
1345 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM; 1345
1346 if (!bond->first_slave)
1347 goto done;
1348
1349 features &= ~NETIF_F_ONE_FOR_ALL;
1346 1350
1347 bond_for_each_slave(bond, slave, i) { 1351 bond_for_each_slave(bond, slave, i) {
1348 features = netdev_compute_features(features, 1352 features = netdev_increment_features(features,
1349 slave->dev->features); 1353 slave->dev->features,
1354 NETIF_F_ONE_FOR_ALL);
1350 if (slave->dev->hard_header_len > max_hard_header_len) 1355 if (slave->dev->hard_header_len > max_hard_header_len)
1351 max_hard_header_len = slave->dev->hard_header_len; 1356 max_hard_header_len = slave->dev->hard_header_len;
1352 } 1357 }
1353 1358
1359done:
1354 features |= (bond_dev->features & BOND_VLAN_FEATURES); 1360 features |= (bond_dev->features & BOND_VLAN_FEATURES);
1355 bond_dev->features = features; 1361 bond_dev->features = netdev_fix_features(features, NULL);
1356 bond_dev->hard_header_len = max_hard_header_len; 1362 bond_dev->hard_header_len = max_hard_header_len;
1357 1363
1358 return 0; 1364 return 0;
@@ -1973,6 +1979,20 @@ void bond_destroy(struct bonding *bond)
1973 unregister_netdevice(bond->dev); 1979 unregister_netdevice(bond->dev);
1974} 1980}
1975 1981
1982static void bond_destructor(struct net_device *bond_dev)
1983{
1984 struct bonding *bond = bond_dev->priv;
1985
1986 if (bond->wq)
1987 destroy_workqueue(bond->wq);
1988
1989 netif_addr_lock_bh(bond_dev);
1990 bond_mc_list_destroy(bond);
1991 netif_addr_unlock_bh(bond_dev);
1992
1993 free_netdev(bond_dev);
1994}
1995
1976/* 1996/*
1977* First release a slave and than destroy the bond if no more slaves iare left. 1997* First release a slave and than destroy the bond if no more slaves iare left.
1978* Must be under rtnl_lock when this function is called. 1998* Must be under rtnl_lock when this function is called.
@@ -2370,6 +2390,9 @@ static void bond_miimon_commit(struct bonding *bond)
2370 continue; 2390 continue;
2371 2391
2372 case BOND_LINK_DOWN: 2392 case BOND_LINK_DOWN:
2393 if (slave->link_failure_count < UINT_MAX)
2394 slave->link_failure_count++;
2395
2373 slave->link = BOND_LINK_DOWN; 2396 slave->link = BOND_LINK_DOWN;
2374 2397
2375 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || 2398 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
@@ -4544,7 +4567,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4544 4567
4545 bond_set_mode_ops(bond, bond->params.mode); 4568 bond_set_mode_ops(bond, bond->params.mode);
4546 4569
4547 bond_dev->destructor = free_netdev; 4570 bond_dev->destructor = bond_destructor;
4548 4571
4549 /* Initialize the device options */ 4572 /* Initialize the device options */
4550 bond_dev->tx_queue_len = 0; 4573 bond_dev->tx_queue_len = 0;
@@ -4583,20 +4606,6 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4583 return 0; 4606 return 0;
4584} 4607}
4585 4608
4586/* De-initialize device specific data.
4587 * Caller must hold rtnl_lock.
4588 */
4589static void bond_deinit(struct net_device *bond_dev)
4590{
4591 struct bonding *bond = bond_dev->priv;
4592
4593 list_del(&bond->bond_list);
4594
4595#ifdef CONFIG_PROC_FS
4596 bond_remove_proc_entry(bond);
4597#endif
4598}
4599
4600static void bond_work_cancel_all(struct bonding *bond) 4609static void bond_work_cancel_all(struct bonding *bond)
4601{ 4610{
4602 write_lock_bh(&bond->lock); 4611 write_lock_bh(&bond->lock);
@@ -4618,6 +4627,22 @@ static void bond_work_cancel_all(struct bonding *bond)
4618 cancel_delayed_work(&bond->ad_work); 4627 cancel_delayed_work(&bond->ad_work);
4619} 4628}
4620 4629
4630/* De-initialize device specific data.
4631 * Caller must hold rtnl_lock.
4632 */
4633static void bond_deinit(struct net_device *bond_dev)
4634{
4635 struct bonding *bond = bond_dev->priv;
4636
4637 list_del(&bond->bond_list);
4638
4639 bond_work_cancel_all(bond);
4640
4641#ifdef CONFIG_PROC_FS
4642 bond_remove_proc_entry(bond);
4643#endif
4644}
4645
4621/* Unregister and free all bond devices. 4646/* Unregister and free all bond devices.
4622 * Caller must hold rtnl_lock. 4647 * Caller must hold rtnl_lock.
4623 */ 4648 */
@@ -4629,9 +4654,6 @@ static void bond_free_all(void)
4629 struct net_device *bond_dev = bond->dev; 4654 struct net_device *bond_dev = bond->dev;
4630 4655
4631 bond_work_cancel_all(bond); 4656 bond_work_cancel_all(bond);
4632 netif_addr_lock_bh(bond_dev);
4633 bond_mc_list_destroy(bond);
4634 netif_addr_unlock_bh(bond_dev);
4635 /* Release the bonded slaves */ 4657 /* Release the bonded slaves */
4636 bond_release_all(bond_dev); 4658 bond_release_all(bond_dev);
4637 bond_destroy(bond); 4659 bond_destroy(bond);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index d6c7d2aa761b..7092df50ff78 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1035,10 +1035,6 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1035 * @pdev: the PCI device that received the packet 1035 * @pdev: the PCI device that received the packet
1036 * @fl: the SGE free list holding the packet 1036 * @fl: the SGE free list holding the packet
1037 * @len: the actual packet length, excluding any SGE padding 1037 * @len: the actual packet length, excluding any SGE padding
1038 * @dma_pad: padding at beginning of buffer left by SGE DMA
1039 * @skb_pad: padding to be used if the packet is copied
1040 * @copy_thres: length threshold under which a packet should be copied
1041 * @drop_thres: # of remaining buffers before we start dropping packets
1042 * 1038 *
1043 * Get the next packet from a free list and complete setup of the 1039 * Get the next packet from a free list and complete setup of the
1044 * sk_buff. If the packet is small we make a copy and recycle the 1040 * sk_buff. If the packet is small we make a copy and recycle the
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 65d0a9103297..7e8a63106bdf 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -32,14 +32,14 @@
32#include <linux/skbuff.h> 32#include <linux/skbuff.h>
33#include <linux/ethtool.h> 33#include <linux/ethtool.h>
34 34
35#include <asm/arch/svinto.h>/* DMA and register descriptions */ 35#include <arch/svinto.h>/* DMA and register descriptions */
36#include <asm/io.h> /* CRIS_LED_* I/O functions */ 36#include <asm/io.h> /* CRIS_LED_* I/O functions */
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/dma.h> 38#include <asm/dma.h>
39#include <asm/system.h> 39#include <asm/system.h>
40#include <asm/ethernet.h> 40#include <asm/ethernet.h>
41#include <asm/cache.h> 41#include <asm/cache.h>
42#include <asm/arch/io_interface_mux.h> 42#include <arch/io_interface_mux.h>
43 43
44//#define ETHDEBUG 44//#define ETHDEBUG
45#define D(x) 45#define D(x)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 1ace41a13ac3..2c341f83d327 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1307,8 +1307,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1307 u32 fw_vers = 0; 1307 u32 fw_vers = 0;
1308 u32 tp_vers = 0; 1308 u32 tp_vers = 0;
1309 1309
1310 spin_lock(&adapter->stats_lock);
1310 t3_get_fw_version(adapter, &fw_vers); 1311 t3_get_fw_version(adapter, &fw_vers);
1311 t3_get_tp_version(adapter, &tp_vers); 1312 t3_get_tp_version(adapter, &tp_vers);
1313 spin_unlock(&adapter->stats_lock);
1312 1314
1313 strcpy(info->driver, DRV_NAME); 1315 strcpy(info->driver, DRV_NAME);
1314 strcpy(info->version, DRV_VERSION); 1316 strcpy(info->version, DRV_VERSION);
@@ -2699,7 +2701,7 @@ static void set_nqsets(struct adapter *adap)
2699 int hwports = adap->params.nports; 2701 int hwports = adap->params.nports;
2700 int nqsets = SGE_QSETS; 2702 int nqsets = SGE_QSETS;
2701 2703
2702 if (adap->params.rev > 0) { 2704 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2703 if (hwports == 2 && 2705 if (hwports == 2 &&
2704 (hwports * nqsets > SGE_QSETS || 2706 (hwports * nqsets > SGE_QSETS ||
2705 num_cpus >= nqsets / hwports)) 2707 num_cpus >= nqsets / hwports))
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 4407ac9bb555..ff1611f90e7a 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -431,6 +431,7 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
431 for (i = 0; i < l2t_capacity; ++i) { 431 for (i = 0; i < l2t_capacity; ++i) {
432 d->l2tab[i].idx = i; 432 d->l2tab[i].idx = i;
433 d->l2tab[i].state = L2T_STATE_UNUSED; 433 d->l2tab[i].state = L2T_STATE_UNUSED;
434 __skb_queue_head_init(&d->l2tab[i].arpq);
434 spin_lock_init(&d->l2tab[i].lock); 435 spin_lock_init(&d->l2tab[i].lock);
435 atomic_set(&d->l2tab[i].refcnt, 0); 436 atomic_set(&d->l2tab[i].refcnt, 0);
436 } 437 }
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 968f64be3743..9a0898b0dbce 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -572,7 +572,7 @@ struct t3_vpd {
572 u32 pad; /* for multiple-of-4 sizing and alignment */ 572 u32 pad; /* for multiple-of-4 sizing and alignment */
573}; 573};
574 574
575#define EEPROM_MAX_POLL 4 575#define EEPROM_MAX_POLL 40
576#define EEPROM_STAT_ADDR 0x4000 576#define EEPROM_STAT_ADDR 0x4000
577#define VPD_BASE 0xc00 577#define VPD_BASE 0xc00
578 578
@@ -3690,6 +3690,12 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3690 ; 3690 ;
3691 3691
3692 pti = &port_types[adapter->params.vpd.port_type[j]]; 3692 pti = &port_types[adapter->params.vpd.port_type[j]];
3693 if (!pti->phy_prep) {
3694 CH_ALERT(adapter, "Invalid port type index %d\n",
3695 adapter->params.vpd.port_type[j]);
3696 return -EINVAL;
3697 }
3698
3693 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, 3699 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3694 ai->mdio_ops); 3700 ai->mdio_ops);
3695 if (ret) 3701 if (ret)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index f42c23f42652..5a9083e3f443 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -47,15 +47,6 @@
47#define CARDNAME "dm9000" 47#define CARDNAME "dm9000"
48#define DRV_VERSION "1.31" 48#define DRV_VERSION "1.31"
49 49
50#ifdef CONFIG_BLACKFIN
51#define readsb insb
52#define readsw insw
53#define readsl insl
54#define writesb outsb
55#define writesw outsw
56#define writesl outsl
57#endif
58
59/* 50/*
60 * Transmit timeout, default 5 seconds. 51 * Transmit timeout, default 5 seconds.
61 */ 52 */
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3d69fae781cf..e8bfcce6b319 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -166,7 +166,7 @@
166 166
167#define DRV_NAME "e100" 167#define DRV_NAME "e100"
168#define DRV_EXT "-NAPI" 168#define DRV_EXT "-NAPI"
169#define DRV_VERSION "3.5.23-k4"DRV_EXT 169#define DRV_VERSION "3.5.23-k6"DRV_EXT
170#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 170#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
171#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" 171#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
172#define PFX DRV_NAME ": " 172#define PFX DRV_NAME ": "
@@ -1804,7 +1804,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1804 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; 1804 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1805 put_unaligned_le32(rx->dma_addr, &prev_rfd->link); 1805 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1806 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, 1806 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1807 sizeof(struct rfd), PCI_DMA_TODEVICE); 1807 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1808 } 1808 }
1809 1809
1810 return 0; 1810 return 0;
@@ -1823,7 +1823,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1823 1823
1824 /* Need to sync before taking a peek at cb_complete bit */ 1824 /* Need to sync before taking a peek at cb_complete bit */
1825 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, 1825 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1826 sizeof(struct rfd), PCI_DMA_FROMDEVICE); 1826 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1827 rfd_status = le16_to_cpu(rfd->status); 1827 rfd_status = le16_to_cpu(rfd->status);
1828 1828
1829 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status); 1829 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
@@ -1850,7 +1850,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1850 1850
1851 /* Get data */ 1851 /* Get data */
1852 pci_unmap_single(nic->pdev, rx->dma_addr, 1852 pci_unmap_single(nic->pdev, rx->dma_addr,
1853 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1853 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1854 1854
1855 /* If this buffer has the el bit, but we think the receiver 1855 /* If this buffer has the el bit, but we think the receiver
1856 * is still running, check to see if it really stopped while 1856 * is still running, check to see if it really stopped while
@@ -1943,7 +1943,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1943 new_before_last_rfd->command |= cpu_to_le16(cb_el); 1943 new_before_last_rfd->command |= cpu_to_le16(cb_el);
1944 pci_dma_sync_single_for_device(nic->pdev, 1944 pci_dma_sync_single_for_device(nic->pdev,
1945 new_before_last_rx->dma_addr, sizeof(struct rfd), 1945 new_before_last_rx->dma_addr, sizeof(struct rfd),
1946 PCI_DMA_TODEVICE); 1946 PCI_DMA_BIDIRECTIONAL);
1947 1947
1948 /* Now that we have a new stopping point, we can clear the old 1948 /* Now that we have a new stopping point, we can clear the old
1949 * stopping point. We must sync twice to get the proper 1949 * stopping point. We must sync twice to get the proper
@@ -1951,11 +1951,11 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1951 old_before_last_rfd->command &= ~cpu_to_le16(cb_el); 1951 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
1952 pci_dma_sync_single_for_device(nic->pdev, 1952 pci_dma_sync_single_for_device(nic->pdev,
1953 old_before_last_rx->dma_addr, sizeof(struct rfd), 1953 old_before_last_rx->dma_addr, sizeof(struct rfd),
1954 PCI_DMA_TODEVICE); 1954 PCI_DMA_BIDIRECTIONAL);
1955 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN); 1955 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1956 pci_dma_sync_single_for_device(nic->pdev, 1956 pci_dma_sync_single_for_device(nic->pdev,
1957 old_before_last_rx->dma_addr, sizeof(struct rfd), 1957 old_before_last_rx->dma_addr, sizeof(struct rfd),
1958 PCI_DMA_TODEVICE); 1958 PCI_DMA_BIDIRECTIONAL);
1959 } 1959 }
1960 1960
1961 if(restart_required) { 1961 if(restart_required) {
@@ -1978,7 +1978,7 @@ static void e100_rx_clean_list(struct nic *nic)
1978 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1978 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1979 if(rx->skb) { 1979 if(rx->skb) {
1980 pci_unmap_single(nic->pdev, rx->dma_addr, 1980 pci_unmap_single(nic->pdev, rx->dma_addr,
1981 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1981 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1982 dev_kfree_skb(rx->skb); 1982 dev_kfree_skb(rx->skb);
1983 } 1983 }
1984 } 1984 }
@@ -2021,7 +2021,7 @@ static int e100_rx_alloc_list(struct nic *nic)
2021 before_last->command |= cpu_to_le16(cb_el); 2021 before_last->command |= cpu_to_le16(cb_el);
2022 before_last->size = 0; 2022 before_last->size = 0;
2023 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, 2023 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2024 sizeof(struct rfd), PCI_DMA_TODEVICE); 2024 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2025 2025
2026 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 2026 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2027 nic->ru_running = RU_SUSPENDED; 2027 nic->ru_running = RU_SUSPENDED;
@@ -2222,7 +2222,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2222 msleep(10); 2222 msleep(10);
2223 2223
2224 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, 2224 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2225 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 2225 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2226 2226
2227 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), 2227 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2228 skb->data, ETH_DATA_LEN)) 2228 skb->data, ETH_DATA_LEN))
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 6a3893acfe04..c854c96f5ab3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1774,7 +1774,8 @@ static void e1000_get_wol(struct net_device *netdev,
1774 1774
1775 /* this function will set ->supported = 0 and return 1 if wol is not 1775 /* this function will set ->supported = 0 and return 1 if wol is not
1776 * supported by this hardware */ 1776 * supported by this hardware */
1777 if (e1000_wol_exclusion(adapter, wol)) 1777 if (e1000_wol_exclusion(adapter, wol) ||
1778 !device_can_wakeup(&adapter->pdev->dev))
1778 return; 1779 return;
1779 1780
1780 /* apply any specific unsupported masks here */ 1781 /* apply any specific unsupported masks here */
@@ -1811,7 +1812,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1811 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1812 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1812 return -EOPNOTSUPP; 1813 return -EOPNOTSUPP;
1813 1814
1814 if (e1000_wol_exclusion(adapter, wol)) 1815 if (e1000_wol_exclusion(adapter, wol) ||
1816 !device_can_wakeup(&adapter->pdev->dev))
1815 return wol->wolopts ? -EOPNOTSUPP : 0; 1817 return wol->wolopts ? -EOPNOTSUPP : 0;
1816 1818
1817 switch (hw->device_id) { 1819 switch (hw->device_id) {
@@ -1838,6 +1840,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1838 if (wol->wolopts & WAKE_MAGIC) 1840 if (wol->wolopts & WAKE_MAGIC)
1839 adapter->wol |= E1000_WUFC_MAG; 1841 adapter->wol |= E1000_WUFC_MAG;
1840 1842
1843 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1844
1841 return 0; 1845 return 0;
1842} 1846}
1843 1847
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index fac82152e4c8..872799b746f5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1179,6 +1179,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1179 1179
1180 /* initialize the wol settings based on the eeprom settings */ 1180 /* initialize the wol settings based on the eeprom settings */
1181 adapter->wol = adapter->eeprom_wol; 1181 adapter->wol = adapter->eeprom_wol;
1182 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1182 1183
1183 /* print bus type/speed/width info */ 1184 /* print bus type/speed/width info */
1184 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", 1185 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c55de1c027af..c55fd6fdb91c 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -299,6 +299,7 @@ struct e1000_adapter {
299 unsigned long led_status; 299 unsigned long led_status;
300 300
301 unsigned int flags; 301 unsigned int flags;
302 unsigned int flags2;
302 struct work_struct downshift_task; 303 struct work_struct downshift_task;
303 struct work_struct update_phy_task; 304 struct work_struct update_phy_task;
304}; 305};
@@ -306,6 +307,7 @@ struct e1000_adapter {
306struct e1000_info { 307struct e1000_info {
307 enum e1000_mac_type mac; 308 enum e1000_mac_type mac;
308 unsigned int flags; 309 unsigned int flags;
310 unsigned int flags2;
309 u32 pba; 311 u32 pba;
310 s32 (*get_variants)(struct e1000_adapter *); 312 s32 (*get_variants)(struct e1000_adapter *);
311 struct e1000_mac_operations *mac_ops; 313 struct e1000_mac_operations *mac_ops;
@@ -347,6 +349,9 @@ struct e1000_info {
347#define FLAG_RX_RESTART_NOW (1 << 30) 349#define FLAG_RX_RESTART_NOW (1 << 30)
348#define FLAG_MSI_TEST_FAILED (1 << 31) 350#define FLAG_MSI_TEST_FAILED (1 << 31)
349 351
352/* CRC Stripping defines */
353#define FLAG2_CRC_STRIPPING (1 << 0)
354
350#define E1000_RX_DESC_PS(R, i) \ 355#define E1000_RX_DESC_PS(R, i) \
351 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 356 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
352#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 357#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 70c11c811a08..62421ce96311 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1713,7 +1713,8 @@ static void e1000_get_wol(struct net_device *netdev,
1713 wol->supported = 0; 1713 wol->supported = 0;
1714 wol->wolopts = 0; 1714 wol->wolopts = 0;
1715 1715
1716 if (!(adapter->flags & FLAG_HAS_WOL)) 1716 if (!(adapter->flags & FLAG_HAS_WOL) ||
1717 !device_can_wakeup(&adapter->pdev->dev))
1717 return; 1718 return;
1718 1719
1719 wol->supported = WAKE_UCAST | WAKE_MCAST | 1720 wol->supported = WAKE_UCAST | WAKE_MCAST |
@@ -1751,7 +1752,8 @@ static int e1000_set_wol(struct net_device *netdev,
1751 if (wol->wolopts & WAKE_MAGICSECURE) 1752 if (wol->wolopts & WAKE_MAGICSECURE)
1752 return -EOPNOTSUPP; 1753 return -EOPNOTSUPP;
1753 1754
1754 if (!(adapter->flags & FLAG_HAS_WOL)) 1755 if (!(adapter->flags & FLAG_HAS_WOL) ||
1756 !device_can_wakeup(&adapter->pdev->dev))
1755 return wol->wolopts ? -EOPNOTSUPP : 0; 1757 return wol->wolopts ? -EOPNOTSUPP : 0;
1756 1758
1757 /* these settings will always override what we currently have */ 1759 /* these settings will always override what we currently have */
@@ -1770,6 +1772,8 @@ static int e1000_set_wol(struct net_device *netdev,
1770 if (wol->wolopts & WAKE_ARP) 1772 if (wol->wolopts & WAKE_ARP)
1771 adapter->wol |= E1000_WUFC_ARP; 1773 adapter->wol |= E1000_WUFC_ARP;
1772 1774
1775 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1776
1773 return 0; 1777 return 0;
1774} 1778}
1775 1779
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 523b9716a543..d115a6d30f29 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1893,12 +1893,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1893 ctrl |= E1000_CTRL_PHY_RST; 1893 ctrl |= E1000_CTRL_PHY_RST;
1894 } 1894 }
1895 ret_val = e1000_acquire_swflag_ich8lan(hw); 1895 ret_val = e1000_acquire_swflag_ich8lan(hw);
1896 /* Whether or not the swflag was acquired, we need to reset the part */
1896 hw_dbg(hw, "Issuing a global reset to ich8lan"); 1897 hw_dbg(hw, "Issuing a global reset to ich8lan");
1897 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 1898 ew32(CTRL, (ctrl | E1000_CTRL_RST));
1898 msleep(20); 1899 msleep(20);
1899 1900
1900 /* release the swflag because it is not reset by hardware reset */ 1901 if (!ret_val) {
1901 e1000_release_swflag_ich8lan(hw); 1902 /* release the swflag because it is not reset by
1903 * hardware reset
1904 */
1905 e1000_release_swflag_ich8lan(hw);
1906 }
1902 1907
1903 ret_val = e1000e_get_auto_rd_done(hw); 1908 ret_val = e1000e_get_auto_rd_done(hw);
1904 if (ret_val) { 1909 if (ret_val) {
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index abd492b7336d..122539a0e1fe 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -345,7 +345,6 @@ no_buffers:
345/** 345/**
346 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 346 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
347 * @adapter: address of board private structure 347 * @adapter: address of board private structure
348 * @rx_ring: pointer to receive ring structure
349 * @cleaned_count: number of buffers to allocate this pass 348 * @cleaned_count: number of buffers to allocate this pass
350 **/ 349 **/
351 350
@@ -499,6 +498,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
499 goto next_desc; 498 goto next_desc;
500 } 499 }
501 500
501 /* adjust length to remove Ethernet CRC */
502 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
503 length -= 4;
504
502 total_rx_bytes += length; 505 total_rx_bytes += length;
503 total_rx_packets++; 506 total_rx_packets++;
504 507
@@ -804,6 +807,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
804 pci_dma_sync_single_for_device(pdev, ps_page->dma, 807 pci_dma_sync_single_for_device(pdev, ps_page->dma,
805 PAGE_SIZE, PCI_DMA_FROMDEVICE); 808 PAGE_SIZE, PCI_DMA_FROMDEVICE);
806 809
810 /* remove the CRC */
811 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
812 l1 -= 4;
813
807 skb_put(skb, l1); 814 skb_put(skb, l1);
808 goto copydone; 815 goto copydone;
809 } /* if */ 816 } /* if */
@@ -825,6 +832,12 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
825 skb->truesize += length; 832 skb->truesize += length;
826 } 833 }
827 834
835 /* strip the ethernet crc, problem is we're using pages now so
836 * this whole operation can get a little cpu intensive
837 */
838 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
839 pskb_trim(skb, skb->len - 4);
840
828copydone: 841copydone:
829 total_rx_bytes += skb->len; 842 total_rx_bytes += skb->len;
830 total_rx_packets++; 843 total_rx_packets++;
@@ -2301,8 +2314,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2301 else 2314 else
2302 rctl |= E1000_RCTL_LPE; 2315 rctl |= E1000_RCTL_LPE;
2303 2316
2304 /* Enable hardware CRC frame stripping */ 2317 /* Some systems expect that the CRC is included in SMBUS traffic. The
2305 rctl |= E1000_RCTL_SECRC; 2318 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2319 * host memory when this is enabled
2320 */
2321 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2322 rctl |= E1000_RCTL_SECRC;
2306 2323
2307 /* Setup buffer sizes */ 2324 /* Setup buffer sizes */
2308 rctl &= ~E1000_RCTL_SZ_4096; 2325 rctl &= ~E1000_RCTL_SZ_4096;
@@ -4766,6 +4783,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4766 adapter->ei = ei; 4783 adapter->ei = ei;
4767 adapter->pba = ei->pba; 4784 adapter->pba = ei->pba;
4768 adapter->flags = ei->flags; 4785 adapter->flags = ei->flags;
4786 adapter->flags2 = ei->flags2;
4769 adapter->hw.adapter = adapter; 4787 adapter->hw.adapter = adapter;
4770 adapter->hw.mac.type = ei->mac; 4788 adapter->hw.mac.type = ei->mac;
4771 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 4789 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
@@ -4970,6 +4988,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4970 4988
4971 /* initialize the wol settings based on the eeprom settings */ 4989 /* initialize the wol settings based on the eeprom settings */
4972 adapter->wol = adapter->eeprom_wol; 4990 adapter->wol = adapter->eeprom_wol;
4991 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
4973 4992
4974 /* reset the hardware with the new settings */ 4993 /* reset the hardware with the new settings */
4975 e1000e_reset(adapter); 4994 e1000e_reset(adapter);
@@ -5008,6 +5027,7 @@ err_hw_init:
5008err_sw_init: 5027err_sw_init:
5009 if (adapter->hw.flash_address) 5028 if (adapter->hw.flash_address)
5010 iounmap(adapter->hw.flash_address); 5029 iounmap(adapter->hw.flash_address);
5030 e1000e_reset_interrupt_capability(adapter);
5011err_flashmap: 5031err_flashmap:
5012 iounmap(adapter->hw.hw_addr); 5032 iounmap(adapter->hw.hw_addr);
5013err_ioremap: 5033err_ioremap:
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 77a3d7207a5f..e909f96698e8 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -151,6 +151,16 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
151 */ 151 */
152E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); 152E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
153 153
154/*
155 * Enable CRC Stripping
156 *
157 * Valid Range: 0, 1
158 *
159 * Default Value: 1 (enabled)
160 */
161E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
162 "the CRC");
163
154struct e1000_option { 164struct e1000_option {
155 enum { enable_option, range_option, list_option } type; 165 enum { enable_option, range_option, list_option } type;
156 const char *name; 166 const char *name;
@@ -404,6 +414,21 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
404 adapter->flags |= FLAG_SMART_POWER_DOWN; 414 adapter->flags |= FLAG_SMART_POWER_DOWN;
405 } 415 }
406 } 416 }
417 { /* CRC Stripping */
418 const struct e1000_option opt = {
419 .type = enable_option,
420 .name = "CRC Stripping",
421 .err = "defaulting to enabled",
422 .def = OPTION_ENABLED
423 };
424
425 if (num_CrcStripping > bd) {
426 unsigned int crc_stripping = CrcStripping[bd];
427 e1000_validate_option(&crc_stripping, &opt, adapter);
428 if (crc_stripping == OPTION_ENABLED)
429 adapter->flags2 |= FLAG2_CRC_STRIPPING;
430 }
431 }
407 { /* Kumeran Lock Loss Workaround */ 432 { /* Kumeran Lock Loss Workaround */
408 const struct e1000_option opt = { 433 const struct e1000_option opt = {
409 .type = enable_option, 434 .type = enable_option,
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 5524271eedca..002d918fb4c7 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0093" 43#define DRV_VERSION "EHEA_0095"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b70c5314f537..422fcb93e2c3 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2863,7 +2863,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2863 struct ehea_adapter *adapter; 2863 struct ehea_adapter *adapter;
2864 2864
2865 mutex_lock(&dlpar_mem_lock); 2865 mutex_lock(&dlpar_mem_lock);
2866 ehea_info("LPAR memory enlarged - re-initializing driver"); 2866 ehea_info("LPAR memory changed - re-initializing driver");
2867 2867
2868 list_for_each_entry(adapter, &adapter_list, list) 2868 list_for_each_entry(adapter, &adapter_list, list)
2869 if (adapter->active_ports) { 2869 if (adapter->active_ports) {
@@ -2900,13 +2900,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2900 } 2900 }
2901 } 2901 }
2902 2902
2903 ehea_destroy_busmap();
2904 ret = ehea_create_busmap();
2905 if (ret) {
2906 ehea_error("creating ehea busmap failed");
2907 goto out;
2908 }
2909
2910 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 2903 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2911 2904
2912 list_for_each_entry(adapter, &adapter_list, list) 2905 list_for_each_entry(adapter, &adapter_list, list)
@@ -3519,9 +3512,21 @@ void ehea_crash_handler(void)
3519static int ehea_mem_notifier(struct notifier_block *nb, 3512static int ehea_mem_notifier(struct notifier_block *nb,
3520 unsigned long action, void *data) 3513 unsigned long action, void *data)
3521{ 3514{
3515 struct memory_notify *arg = data;
3522 switch (action) { 3516 switch (action) {
3523 case MEM_OFFLINE: 3517 case MEM_CANCEL_OFFLINE:
3524 ehea_info("memory has been removed"); 3518 ehea_info("memory offlining canceled");
3519 /* Readd canceled memory block */
3520 case MEM_ONLINE:
3521 ehea_info("memory is going online");
3522 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3523 return NOTIFY_BAD;
3524 ehea_rereg_mrs(NULL);
3525 break;
3526 case MEM_GOING_OFFLINE:
3527 ehea_info("memory is going offline");
3528 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3529 return NOTIFY_BAD;
3525 ehea_rereg_mrs(NULL); 3530 ehea_rereg_mrs(NULL);
3526 break; 3531 break;
3527 default: 3532 default:
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index db8a9257e680..9d006878f045 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -567,7 +567,7 @@ static inline int ehea_calc_index(unsigned long i, unsigned long s)
567static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, 567static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
568 int dir) 568 int dir)
569{ 569{
570 if(!ehea_top_bmap->dir[dir]) { 570 if (!ehea_top_bmap->dir[dir]) {
571 ehea_top_bmap->dir[dir] = 571 ehea_top_bmap->dir[dir] =
572 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); 572 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
573 if (!ehea_top_bmap->dir[dir]) 573 if (!ehea_top_bmap->dir[dir])
@@ -578,7 +578,7 @@ static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
578 578
579static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) 579static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
580{ 580{
581 if(!ehea_bmap->top[top]) { 581 if (!ehea_bmap->top[top]) {
582 ehea_bmap->top[top] = 582 ehea_bmap->top[top] =
583 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); 583 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
584 if (!ehea_bmap->top[top]) 584 if (!ehea_bmap->top[top])
@@ -587,53 +587,171 @@ static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
587 return ehea_init_top_bmap(ehea_bmap->top[top], dir); 587 return ehea_init_top_bmap(ehea_bmap->top[top], dir);
588} 588}
589 589
590static int ehea_create_busmap_callback(unsigned long pfn, 590static DEFINE_MUTEX(ehea_busmap_mutex);
591 unsigned long nr_pages, void *arg) 591static unsigned long ehea_mr_len;
592
593#define EHEA_BUSMAP_ADD_SECT 1
594#define EHEA_BUSMAP_REM_SECT 0
595
596static void ehea_rebuild_busmap(void)
592{ 597{
593 unsigned long i, mr_len, start_section, end_section; 598 u64 vaddr = EHEA_BUSMAP_START;
594 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; 599 int top, dir, idx;
595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
596 mr_len = *(unsigned long *)arg;
597 600
598 if (!ehea_bmap) 601 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
599 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); 602 struct ehea_top_bmap *ehea_top;
600 if (!ehea_bmap) 603 int valid_dir_entries = 0;
601 return -ENOMEM;
602 604
603 for (i = start_section; i < end_section; i++) { 605 if (!ehea_bmap->top[top])
604 int ret; 606 continue;
605 int top, dir, idx; 607 ehea_top = ehea_bmap->top[top];
606 u64 vaddr; 608 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
609 struct ehea_dir_bmap *ehea_dir;
610 int valid_entries = 0;
607 611
608 top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); 612 if (!ehea_top->dir[dir])
609 dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); 613 continue;
614 valid_dir_entries++;
615 ehea_dir = ehea_top->dir[dir];
616 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
617 if (!ehea_dir->ent[idx])
618 continue;
619 valid_entries++;
620 ehea_dir->ent[idx] = vaddr;
621 vaddr += EHEA_SECTSIZE;
622 }
623 if (!valid_entries) {
624 ehea_top->dir[dir] = NULL;
625 kfree(ehea_dir);
626 }
627 }
628 if (!valid_dir_entries) {
629 ehea_bmap->top[top] = NULL;
630 kfree(ehea_top);
631 }
632 }
633}
610 634
611 ret = ehea_init_bmap(ehea_bmap, top, dir); 635static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
612 if(ret) 636{
613 return ret; 637 unsigned long i, start_section, end_section;
614 638
615 idx = i & EHEA_INDEX_MASK; 639 if (!nr_pages)
616 vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE; 640 return 0;
617 641
618 ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr; 642 if (!ehea_bmap) {
643 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
644 if (!ehea_bmap)
645 return -ENOMEM;
619 } 646 }
620 647
621 mr_len += nr_pages * PAGE_SIZE; 648 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
622 *(unsigned long *)arg = mr_len; 649 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
650 /* Mark entries as valid or invalid only; address is assigned later */
651 for (i = start_section; i < end_section; i++) {
652 u64 flag;
653 int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
654 int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
655 int idx = i & EHEA_INDEX_MASK;
656
657 if (add) {
658 int ret = ehea_init_bmap(ehea_bmap, top, dir);
659 if (ret)
660 return ret;
661 flag = 1; /* valid */
662 ehea_mr_len += EHEA_SECTSIZE;
663 } else {
664 if (!ehea_bmap->top[top])
665 continue;
666 if (!ehea_bmap->top[top]->dir[dir])
667 continue;
668 flag = 0; /* invalid */
669 ehea_mr_len -= EHEA_SECTSIZE;
670 }
623 671
672 ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
673 }
674 ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
624 return 0; 675 return 0;
625} 676}
626 677
627static unsigned long ehea_mr_len; 678int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
679{
680 int ret;
628 681
629static DEFINE_MUTEX(ehea_busmap_mutex); 682 mutex_lock(&ehea_busmap_mutex);
683 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
684 mutex_unlock(&ehea_busmap_mutex);
685 return ret;
686}
687
688int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
689{
690 int ret;
691
692 mutex_lock(&ehea_busmap_mutex);
693 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
694 mutex_unlock(&ehea_busmap_mutex);
695 return ret;
696}
697
698static int ehea_is_hugepage(unsigned long pfn)
699{
700 int page_order;
701
702 if (pfn & EHEA_HUGEPAGE_PFN_MASK)
703 return 0;
704
705 page_order = compound_order(pfn_to_page(pfn));
706 if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
707 return 0;
708
709 return 1;
710}
711
712static int ehea_create_busmap_callback(unsigned long initial_pfn,
713 unsigned long total_nr_pages, void *arg)
714{
715 int ret;
716 unsigned long pfn, start_pfn, end_pfn, nr_pages;
717
718 if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
719 return ehea_update_busmap(initial_pfn, total_nr_pages,
720 EHEA_BUSMAP_ADD_SECT);
721
722 /* Given chunk is >= 16GB -> check for hugepages */
723 start_pfn = initial_pfn;
724 end_pfn = initial_pfn + total_nr_pages;
725 pfn = start_pfn;
726
727 while (pfn < end_pfn) {
728 if (ehea_is_hugepage(pfn)) {
729 /* Add mem found in front of the hugepage */
730 nr_pages = pfn - start_pfn;
731 ret = ehea_update_busmap(start_pfn, nr_pages,
732 EHEA_BUSMAP_ADD_SECT);
733 if (ret)
734 return ret;
735
736 /* Skip the hugepage */
737 pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
738 start_pfn = pfn;
739 } else
740 pfn += (EHEA_SECTSIZE / PAGE_SIZE);
741 }
742
743 /* Add mem found behind the hugepage(s) */
744 nr_pages = pfn - start_pfn;
745 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
746}
630 747
631int ehea_create_busmap(void) 748int ehea_create_busmap(void)
632{ 749{
633 int ret; 750 int ret;
751
634 mutex_lock(&ehea_busmap_mutex); 752 mutex_lock(&ehea_busmap_mutex);
635 ehea_mr_len = 0; 753 ehea_mr_len = 0;
636 ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len, 754 ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
637 ehea_create_busmap_callback); 755 ehea_create_busmap_callback);
638 mutex_unlock(&ehea_busmap_mutex); 756 mutex_unlock(&ehea_busmap_mutex);
639 return ret; 757 return ret;
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0bb6f92fa2f8..0817c1e74a19 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -40,6 +40,9 @@
40#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) 40#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
41#define EHEA_SECTSIZE (1UL << 24) 41#define EHEA_SECTSIZE (1UL << 24)
42#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) 42#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
43#define EHEA_HUGEPAGESHIFT 34
44#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
45#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
43 46
44#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE) 47#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
45#error eHEA module cannot work if kernel sectionsize < ehea sectionsize 48#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
@@ -378,6 +381,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
378 381
379void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 382void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
380 383
384int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
381int ehea_create_busmap(void); 386int ehea_create_busmap(void);
382void ehea_destroy_busmap(void); 387void ehea_destroy_busmap(void);
383u64 ehea_map_vaddr(void *caddr); 388u64 ehea_map_vaddr(void *caddr);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index e1b441effbbe..36cb6e95b465 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -568,6 +568,17 @@ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
568 return erxrdpt; 568 return erxrdpt;
569} 569}
570 570
571/*
572 * Calculate wrap around when reading beyond the end of the RX buffer
573 */
574static u16 rx_packet_start(u16 ptr)
575{
576 if (ptr + RSV_SIZE > RXEND_INIT)
577 return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
578 else
579 return ptr + RSV_SIZE;
580}
581
571static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) 582static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
572{ 583{
573 u16 erxrdpt; 584 u16 erxrdpt;
@@ -938,8 +949,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
938 skb->dev = ndev; 949 skb->dev = ndev;
939 skb_reserve(skb, NET_IP_ALIGN); 950 skb_reserve(skb, NET_IP_ALIGN);
940 /* copy the packet from the receive buffer */ 951 /* copy the packet from the receive buffer */
941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 952 enc28j60_mem_read(priv,
942 len, skb_put(skb, len)); 953 rx_packet_start(priv->next_pk_ptr),
954 len, skb_put(skb, len));
943 if (netif_msg_pktdata(priv)) 955 if (netif_msg_pktdata(priv))
944 dump_packet(__func__, skb->len, skb->data); 956 dump_packet(__func__, skb->len, skb->data);
945 skb->protocol = eth_type_trans(skb, ndev); 957 skb->protocol = eth_type_trans(skb, ndev);
@@ -947,7 +959,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
947 ndev->stats.rx_packets++; 959 ndev->stats.rx_packets++;
948 ndev->stats.rx_bytes += len; 960 ndev->stats.rx_bytes += len;
949 ndev->last_rx = jiffies; 961 ndev->last_rx = jiffies;
950 netif_rx(skb); 962 netif_rx_ni(skb);
951 } 963 }
952 } 964 }
953 /* 965 /*
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 4e4f68304e82..aec3b97e794d 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -401,6 +401,21 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d
401 return 0; 401 return 0;
402} 402}
403 403
404#ifdef CONFIG_NET_POLL_CONTROLLER
405static void mpc52xx_fec_poll_controller(struct net_device *dev)
406{
407 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
408
409 disable_irq(priv->t_irq);
410 mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
411 enable_irq(priv->t_irq);
412 disable_irq(priv->r_irq);
413 mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
414 enable_irq(priv->r_irq);
415}
416#endif
417
418
404/* This handles BestComm transmit task interrupts 419/* This handles BestComm transmit task interrupts
405 */ 420 */
406static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) 421static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
@@ -926,6 +941,9 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
926 ndev->tx_timeout = mpc52xx_fec_tx_timeout; 941 ndev->tx_timeout = mpc52xx_fec_tx_timeout;
927 ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; 942 ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
928 ndev->base_addr = mem.start; 943 ndev->base_addr = mem.start;
944#ifdef CONFIG_NET_POLL_CONTROLLER
945 ndev->poll_controller = mpc52xx_fec_poll_controller;
946#endif
929 947
930 priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */ 948 priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */
931 949
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 08e18bcb970f..45dd9bdc5d62 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -2,6 +2,7 @@
2 * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver 2 * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
3 * 3 *
4 * Copyright (C) 2007 Domen Puncer, Telargo, Inc. 4 * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
5 * Copyright (C) 2008 Wolfram Sang, Pengutronix
5 * 6 *
6 * This file is licensed under the terms of the GNU General Public License 7 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any 8 * version 2. This program is licensed "as is" without any warranty of any
@@ -21,58 +22,45 @@ struct mpc52xx_fec_mdio_priv {
21 struct mpc52xx_fec __iomem *regs; 22 struct mpc52xx_fec __iomem *regs;
22}; 23};
23 24
24static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) 25static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
26 int reg, u32 value)
25{ 27{
26 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 28 struct mpc52xx_fec_mdio_priv *priv = bus->priv;
27 struct mpc52xx_fec __iomem *fec; 29 struct mpc52xx_fec __iomem *fec;
28 int tries = 100; 30 int tries = 100;
29 u32 request = FEC_MII_READ_FRAME; 31
32 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
33 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
30 34
31 fec = priv->regs; 35 fec = priv->regs;
32 out_be32(&fec->ievent, FEC_IEVENT_MII); 36 out_be32(&fec->ievent, FEC_IEVENT_MII);
33 37 out_be32(&priv->regs->mii_data, value);
34 request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
35 request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
36
37 out_be32(&priv->regs->mii_data, request);
38 38
39 /* wait for it to finish, this takes about 23 us on lite5200b */ 39 /* wait for it to finish, this takes about 23 us on lite5200b */
40 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) 40 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
41 udelay(5); 41 udelay(5);
42 42
43 if (tries == 0) 43 if (!tries)
44 return -ETIMEDOUT; 44 return -ETIMEDOUT;
45 45
46 return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK; 46 return value & FEC_MII_DATA_OP_RD ?
47 in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0;
47} 48}
48 49
49static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) 50static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
50{ 51{
51 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 52 return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
52 struct mpc52xx_fec __iomem *fec; 53}
53 u32 value = data;
54 int tries = 100;
55
56 fec = priv->regs;
57 out_be32(&fec->ievent, FEC_IEVENT_MII);
58
59 value |= FEC_MII_WRITE_FRAME;
60 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
61 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
62
63 out_be32(&priv->regs->mii_data, value);
64
65 /* wait for request to finish */
66 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
67 udelay(5);
68
69 if (tries == 0)
70 return -ETIMEDOUT;
71 54
72 return 0; 55static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
56 u16 data)
57{
58 return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
59 data | FEC_MII_WRITE_FRAME);
73} 60}
74 61
75static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match) 62static int mpc52xx_fec_mdio_probe(struct of_device *of,
63 const struct of_device_id *match)
76{ 64{
77 struct device *dev = &of->dev; 65 struct device *dev = &of->dev;
78 struct device_node *np = of->node; 66 struct device_node *np = of->node;
@@ -131,7 +119,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
131 dev_set_drvdata(dev, bus); 119 dev_set_drvdata(dev, bus);
132 120
133 /* set MII speed */ 121 /* set MII speed */
134 out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1); 122 out_be32(&priv->regs->mii_speed,
123 ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
135 124
136 /* enable MII interrupt */ 125 /* enable MII interrupt */
137 out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII); 126 out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index cb51c1fb0338..a6f49d025787 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1099,7 +1099,9 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1099 ndev->stop = fs_enet_close; 1099 ndev->stop = fs_enet_close;
1100 ndev->get_stats = fs_enet_get_stats; 1100 ndev->get_stats = fs_enet_get_stats;
1101 ndev->set_multicast_list = fs_set_multicast_list; 1101 ndev->set_multicast_list = fs_set_multicast_list;
1102 1102#ifdef CONFIG_NET_POLL_CONTROLLER
1103 ndev->poll_controller = fs_enet_netpoll;
1104#endif
1103 if (fpi->use_napi) 1105 if (fpi->use_napi)
1104 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, 1106 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1105 fpi->napi_weight); 1107 fpi->napi_weight);
@@ -1209,7 +1211,7 @@ static void __exit fs_cleanup(void)
1209static void fs_enet_netpoll(struct net_device *dev) 1211static void fs_enet_netpoll(struct net_device *dev)
1210{ 1212{
1211 disable_irq(dev->irq); 1213 disable_irq(dev->irq);
1212 fs_enet_interrupt(dev->irq, dev, NULL); 1214 fs_enet_interrupt(dev->irq, dev);
1213 enable_irq(dev->irq); 1215 enable_irq(dev->irq);
1214} 1216}
1215#endif 1217#endif
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b5bb7ae2817f..c4af949bf860 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -161,7 +161,7 @@ static int gfar_probe(struct platform_device *pdev)
161 struct gfar_private *priv = NULL; 161 struct gfar_private *priv = NULL;
162 struct gianfar_platform_data *einfo; 162 struct gianfar_platform_data *einfo;
163 struct resource *r; 163 struct resource *r;
164 int err = 0; 164 int err = 0, irq;
165 DECLARE_MAC_BUF(mac); 165 DECLARE_MAC_BUF(mac);
166 166
167 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 167 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
@@ -187,15 +187,25 @@ static int gfar_probe(struct platform_device *pdev)
187 187
188 /* fill out IRQ fields */ 188 /* fill out IRQ fields */
189 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 189 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
190 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 190 irq = platform_get_irq_byname(pdev, "tx");
191 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 191 if (irq < 0)
192 priv->interruptError = platform_get_irq_byname(pdev, "error"); 192 goto regs_fail;
193 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) 193 priv->interruptTransmit = irq;
194
195 irq = platform_get_irq_byname(pdev, "rx");
196 if (irq < 0)
197 goto regs_fail;
198 priv->interruptReceive = irq;
199
200 irq = platform_get_irq_byname(pdev, "error");
201 if (irq < 0)
194 goto regs_fail; 202 goto regs_fail;
203 priv->interruptError = irq;
195 } else { 204 } else {
196 priv->interruptTransmit = platform_get_irq(pdev, 0); 205 irq = platform_get_irq(pdev, 0);
197 if (priv->interruptTransmit < 0) 206 if (irq < 0)
198 goto regs_fail; 207 goto regs_fail;
208 priv->interruptTransmit = irq;
199 } 209 }
200 210
201 /* get a pointer to the register memory */ 211 /* get a pointer to the register memory */
@@ -576,6 +586,18 @@ static void gfar_configure_serdes(struct net_device *dev)
576 struct gfar_mii __iomem *regs = 586 struct gfar_mii __iomem *regs =
577 (void __iomem *)&priv->regs->gfar_mii_regs; 587 (void __iomem *)&priv->regs->gfar_mii_regs;
578 int tbipa = gfar_read(&priv->regs->tbipa); 588 int tbipa = gfar_read(&priv->regs->tbipa);
589 struct mii_bus *bus = gfar_get_miibus(priv);
590
591 if (bus)
592 mutex_lock(&bus->mdio_lock);
593
594 /* If the link is already up, we must already be ok, and don't need to
595 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
596 * everything for us? Resetting it takes the link down and requires
597 * several seconds for it to come back.
598 */
599 if (gfar_local_mdio_read(regs, tbipa, MII_BMSR) & BMSR_LSTATUS)
600 goto done;
579 601
580 /* Single clk mode, mii mode off(for serdes communication) */ 602 /* Single clk mode, mii mode off(for serdes communication) */
581 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT); 603 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT);
@@ -586,6 +608,10 @@ static void gfar_configure_serdes(struct net_device *dev)
586 608
587 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE | 609 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE |
588 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 610 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
611
612 done:
613 if (bus)
614 mutex_unlock(&bus->mdio_lock);
589} 615}
590 616
591static void init_registers(struct net_device *dev) 617static void init_registers(struct net_device *dev)
@@ -1381,6 +1407,10 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1381 if (bdp->status & TXBD_DEF) 1407 if (bdp->status & TXBD_DEF)
1382 dev->stats.collisions++; 1408 dev->stats.collisions++;
1383 1409
1410 /* Unmap the DMA memory */
1411 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1412 bdp->length, DMA_TO_DEVICE);
1413
1384 /* Free the sk buffer associated with this TxBD */ 1414 /* Free the sk buffer associated with this TxBD */
1385 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1415 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1386 1416
@@ -1640,6 +1670,9 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1640 1670
1641 skb = priv->rx_skbuff[priv->skb_currx]; 1671 skb = priv->rx_skbuff[priv->skb_currx];
1642 1672
1673 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1674 priv->rx_buffer_size, DMA_FROM_DEVICE);
1675
1643 /* We drop the frame if we failed to allocate a new buffer */ 1676 /* We drop the frame if we failed to allocate a new buffer */
1644 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 1677 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1645 bdp->status & RXBD_ERR)) { 1678 bdp->status & RXBD_ERR)) {
@@ -1648,14 +1681,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1648 if (unlikely(!newskb)) 1681 if (unlikely(!newskb))
1649 newskb = skb; 1682 newskb = skb;
1650 1683
1651 if (skb) { 1684 if (skb)
1652 dma_unmap_single(&priv->dev->dev,
1653 bdp->bufPtr,
1654 priv->rx_buffer_size,
1655 DMA_FROM_DEVICE);
1656
1657 dev_kfree_skb_any(skb); 1685 dev_kfree_skb_any(skb);
1658 }
1659 } else { 1686 } else {
1660 /* Increment the number of packets */ 1687 /* Increment the number of packets */
1661 dev->stats.rx_packets++; 1688 dev->stats.rx_packets++;
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index bf73eea98010..0e2595d24933 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -269,6 +269,27 @@ static struct device_driver gianfar_mdio_driver = {
269 .remove = gfar_mdio_remove, 269 .remove = gfar_mdio_remove,
270}; 270};
271 271
272static int match_mdio_bus(struct device *dev, void *data)
273{
274 const struct gfar_private *priv = data;
275 const struct platform_device *pdev = to_platform_device(dev);
276
277 return !strcmp(pdev->name, gianfar_mdio_driver.name) &&
278 pdev->id == priv->einfo->mdio_bus;
279}
280
281/* Given a gfar_priv structure, find the mii_bus controlled by this device (not
282 * necessarily the same as the bus the gfar's PHY is on), if one exists.
283 * Normally only the first gianfar controls a mii_bus. */
284struct mii_bus *gfar_get_miibus(const struct gfar_private *priv)
285{
286 /*const*/ struct device *d;
287
288 d = bus_find_device(gianfar_mdio_driver.bus, NULL, (void *)priv,
289 match_mdio_bus);
290 return d ? dev_get_drvdata(d) : NULL;
291}
292
272int __init gfar_mdio_init(void) 293int __init gfar_mdio_init(void)
273{ 294{
274 return driver_register(&gianfar_mdio_driver); 295 return driver_register(&gianfar_mdio_driver);
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index 2af28b16a0e2..02dc970ca1ff 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -18,6 +18,8 @@
18#ifndef __GIANFAR_MII_H 18#ifndef __GIANFAR_MII_H
19#define __GIANFAR_MII_H 19#define __GIANFAR_MII_H
20 20
21struct gfar_private; /* forward ref */
22
21#define MIIMIND_BUSY 0x00000001 23#define MIIMIND_BUSY 0x00000001
22#define MIIMIND_NOTVALID 0x00000004 24#define MIIMIND_NOTVALID 0x00000004
23 25
@@ -44,6 +46,7 @@ int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
44int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, 46int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
45 int regnum, u16 value); 47 int regnum, u16 value);
46int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); 48int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
49struct mii_bus *gfar_get_miibus(const struct gfar_private *priv);
47int __init gfar_mdio_init(void); 50int __init gfar_mdio_init(void);
48void gfar_mdio_exit(void); 51void gfar_mdio_exit(void);
49#endif /* GIANFAR_PHY_H */ 52#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index fbbd3e660c27..c01e290d09d2 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -230,7 +230,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
230 dev->open = &hpp_open; 230 dev->open = &hpp_open;
231 dev->stop = &hpp_close; 231 dev->stop = &hpp_close;
232#ifdef CONFIG_NET_POLL_CONTROLLER 232#ifdef CONFIG_NET_POLL_CONTROLLER
233 dev->poll_controller = ei_poll; 233 dev->poll_controller = eip_poll;
234#endif 234#endif
235 235
236 ei_status.name = name; 236 ei_status.name = name;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index efcf21c9f5c7..901212aa37cb 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2604,8 +2604,16 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2604 if (of_device_is_compatible(np, "ibm,emac-440ep") || 2604 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2605 of_device_is_compatible(np, "ibm,emac-440gr")) 2605 of_device_is_compatible(np, "ibm,emac-440gr"))
2606 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX; 2606 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2607 if (of_device_is_compatible(np, "ibm,emac-405ez")) 2607 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2608#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
2608 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x; 2609 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2610#else
2611 printk(KERN_ERR "%s: Flow control not disabled!\n",
2612 np->full_name);
2613 return -ENXIO;
2614#endif
2615 }
2616
2609 } 2617 }
2610 2618
2611 /* Fixup some feature bits based on the device tree */ 2619 /* Fixup some feature bits based on the device tree */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 1839d3f154a3..ecf9798987fa 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -280,9 +280,11 @@ static irqreturn_t mal_txeob(int irq, void *dev_instance)
280 mal_schedule_poll(mal); 280 mal_schedule_poll(mal);
281 set_mal_dcrn(mal, MAL_TXEOBISR, r); 281 set_mal_dcrn(mal, MAL_TXEOBISR, r);
282 282
283#ifdef CONFIG_PPC_DCR_NATIVE
283 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) 284 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
284 mtdcri(SDR0, DCRN_SDR_ICINTSTAT, 285 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
285 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); 286 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
287#endif
286 288
287 return IRQ_HANDLED; 289 return IRQ_HANDLED;
288} 290}
@@ -298,9 +300,11 @@ static irqreturn_t mal_rxeob(int irq, void *dev_instance)
298 mal_schedule_poll(mal); 300 mal_schedule_poll(mal);
299 set_mal_dcrn(mal, MAL_RXEOBISR, r); 301 set_mal_dcrn(mal, MAL_RXEOBISR, r);
300 302
303#ifdef CONFIG_PPC_DCR_NATIVE
301 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) 304 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
302 mtdcri(SDR0, DCRN_SDR_ICINTSTAT, 305 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
303 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); 306 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
307#endif
304 308
305 return IRQ_HANDLED; 309 return IRQ_HANDLED;
306} 310}
@@ -572,9 +576,18 @@ static int __devinit mal_probe(struct of_device *ofdev,
572 goto fail; 576 goto fail;
573 } 577 }
574 578
575 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) 579 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) {
580#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
581 defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
576 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | 582 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
577 MAL_FTR_COMMON_ERR_INT); 583 MAL_FTR_COMMON_ERR_INT);
584#else
585 printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
586 ofdev->node->full_name);
587 err = -ENODEV;
588 goto fail;
589#endif
590 }
578 591
579 mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); 592 mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0);
580 mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); 593 mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 58906c984be9..89964fa739a0 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1776,7 +1776,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1776 1776
1777 /* this function will set ->supported = 0 and return 1 if wol is not 1777 /* this function will set ->supported = 0 and return 1 if wol is not
1778 * supported by this hardware */ 1778 * supported by this hardware */
1779 if (igb_wol_exclusion(adapter, wol)) 1779 if (igb_wol_exclusion(adapter, wol) ||
1780 !device_can_wakeup(&adapter->pdev->dev))
1780 return; 1781 return;
1781 1782
1782 /* apply any specific unsupported masks here */ 1783 /* apply any specific unsupported masks here */
@@ -1805,7 +1806,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1805 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1806 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1806 return -EOPNOTSUPP; 1807 return -EOPNOTSUPP;
1807 1808
1808 if (igb_wol_exclusion(adapter, wol)) 1809 if (igb_wol_exclusion(adapter, wol) ||
1810 !device_can_wakeup(&adapter->pdev->dev))
1809 return wol->wolopts ? -EOPNOTSUPP : 0; 1811 return wol->wolopts ? -EOPNOTSUPP : 0;
1810 1812
1811 switch (hw->device_id) { 1813 switch (hw->device_id) {
@@ -1825,6 +1827,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1825 if (wol->wolopts & WAKE_MAGIC) 1827 if (wol->wolopts & WAKE_MAGIC)
1826 adapter->wol |= E1000_WUFC_MAG; 1828 adapter->wol |= E1000_WUFC_MAG;
1827 1829
1830 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1831
1828 return 0; 1832 return 0;
1829} 1833}
1830 1834
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 93d02efa9a0a..20d27e622ec1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -38,10 +38,11 @@
38#include <linux/ethtool.h> 38#include <linux/ethtool.h>
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/pci-aspm.h>
41#include <linux/delay.h> 42#include <linux/delay.h>
42#include <linux/interrupt.h> 43#include <linux/interrupt.h>
43#include <linux/if_ether.h> 44#include <linux/if_ether.h>
44#ifdef CONFIG_DCA 45#ifdef CONFIG_IGB_DCA
45#include <linux/dca.h> 46#include <linux/dca.h>
46#endif 47#endif
47#include "igb.h" 48#include "igb.h"
@@ -106,11 +107,11 @@ static irqreturn_t igb_msix_other(int irq, void *);
106static irqreturn_t igb_msix_rx(int irq, void *); 107static irqreturn_t igb_msix_rx(int irq, void *);
107static irqreturn_t igb_msix_tx(int irq, void *); 108static irqreturn_t igb_msix_tx(int irq, void *);
108static int igb_clean_rx_ring_msix(struct napi_struct *, int); 109static int igb_clean_rx_ring_msix(struct napi_struct *, int);
109#ifdef CONFIG_DCA 110#ifdef CONFIG_IGB_DCA
110static void igb_update_rx_dca(struct igb_ring *); 111static void igb_update_rx_dca(struct igb_ring *);
111static void igb_update_tx_dca(struct igb_ring *); 112static void igb_update_tx_dca(struct igb_ring *);
112static void igb_setup_dca(struct igb_adapter *); 113static void igb_setup_dca(struct igb_adapter *);
113#endif /* CONFIG_DCA */ 114#endif /* CONFIG_IGB_DCA */
114static bool igb_clean_tx_irq(struct igb_ring *); 115static bool igb_clean_tx_irq(struct igb_ring *);
115static int igb_poll(struct napi_struct *, int); 116static int igb_poll(struct napi_struct *, int);
116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 117static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
@@ -131,7 +132,7 @@ static int igb_suspend(struct pci_dev *, pm_message_t);
131static int igb_resume(struct pci_dev *); 132static int igb_resume(struct pci_dev *);
132#endif 133#endif
133static void igb_shutdown(struct pci_dev *); 134static void igb_shutdown(struct pci_dev *);
134#ifdef CONFIG_DCA 135#ifdef CONFIG_IGB_DCA
135static int igb_notify_dca(struct notifier_block *, unsigned long, void *); 136static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
136static struct notifier_block dca_notifier = { 137static struct notifier_block dca_notifier = {
137 .notifier_call = igb_notify_dca, 138 .notifier_call = igb_notify_dca,
@@ -207,7 +208,7 @@ static int __init igb_init_module(void)
207 global_quad_port_a = 0; 208 global_quad_port_a = 0;
208 209
209 ret = pci_register_driver(&igb_driver); 210 ret = pci_register_driver(&igb_driver);
210#ifdef CONFIG_DCA 211#ifdef CONFIG_IGB_DCA
211 dca_register_notify(&dca_notifier); 212 dca_register_notify(&dca_notifier);
212#endif 213#endif
213 return ret; 214 return ret;
@@ -223,7 +224,7 @@ module_init(igb_init_module);
223 **/ 224 **/
224static void __exit igb_exit_module(void) 225static void __exit igb_exit_module(void)
225{ 226{
226#ifdef CONFIG_DCA 227#ifdef CONFIG_IGB_DCA
227 dca_unregister_notify(&dca_notifier); 228 dca_unregister_notify(&dca_notifier);
228#endif 229#endif
229 pci_unregister_driver(&igb_driver); 230 pci_unregister_driver(&igb_driver);
@@ -966,10 +967,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
966 struct net_device *netdev; 967 struct net_device *netdev;
967 struct igb_adapter *adapter; 968 struct igb_adapter *adapter;
968 struct e1000_hw *hw; 969 struct e1000_hw *hw;
970 struct pci_dev *us_dev;
969 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 971 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
970 unsigned long mmio_start, mmio_len; 972 unsigned long mmio_start, mmio_len;
971 int i, err, pci_using_dac; 973 int i, err, pci_using_dac, pos;
972 u16 eeprom_data = 0; 974 u16 eeprom_data = 0, state = 0;
973 u16 eeprom_apme_mask = IGB_EEPROM_APME; 975 u16 eeprom_apme_mask = IGB_EEPROM_APME;
974 u32 part_num; 976 u32 part_num;
975 int bars, need_ioport; 977 int bars, need_ioport;
@@ -1004,6 +1006,27 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1004 } 1006 }
1005 } 1007 }
1006 1008
1009 /* 82575 requires that the pci-e link partner disable the L0s state */
1010 switch (pdev->device) {
1011 case E1000_DEV_ID_82575EB_COPPER:
1012 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1013 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1014 us_dev = pdev->bus->self;
1015 pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
1016 if (pos) {
1017 pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1018 &state);
1019 state &= ~PCIE_LINK_STATE_L0S;
1020 pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1021 state);
1022 dev_info(&pdev->dev,
1023 "Disabling ASPM L0s upstream switch port %s\n",
1024 pci_name(us_dev));
1025 }
1026 default:
1027 break;
1028 }
1029
1007 err = pci_request_selected_regions(pdev, bars, igb_driver_name); 1030 err = pci_request_selected_regions(pdev, bars, igb_driver_name);
1008 if (err) 1031 if (err)
1009 goto err_pci_reg; 1032 goto err_pci_reg;
@@ -1220,6 +1243,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1220 1243
1221 /* initialize the wol settings based on the eeprom settings */ 1244 /* initialize the wol settings based on the eeprom settings */
1222 adapter->wol = adapter->eeprom_wol; 1245 adapter->wol = adapter->eeprom_wol;
1246 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1223 1247
1224 /* reset the hardware with the new settings */ 1248 /* reset the hardware with the new settings */
1225 igb_reset(adapter); 1249 igb_reset(adapter);
@@ -1237,7 +1261,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1237 if (err) 1261 if (err)
1238 goto err_register; 1262 goto err_register;
1239 1263
1240#ifdef CONFIG_DCA 1264#ifdef CONFIG_IGB_DCA
1241 if ((adapter->flags & IGB_FLAG_HAS_DCA) && 1265 if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
1242 (dca_add_requester(&pdev->dev) == 0)) { 1266 (dca_add_requester(&pdev->dev) == 0)) {
1243 adapter->flags |= IGB_FLAG_DCA_ENABLED; 1267 adapter->flags |= IGB_FLAG_DCA_ENABLED;
@@ -1311,7 +1335,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1311{ 1335{
1312 struct net_device *netdev = pci_get_drvdata(pdev); 1336 struct net_device *netdev = pci_get_drvdata(pdev);
1313 struct igb_adapter *adapter = netdev_priv(netdev); 1337 struct igb_adapter *adapter = netdev_priv(netdev);
1314#ifdef CONFIG_DCA 1338#ifdef CONFIG_IGB_DCA
1315 struct e1000_hw *hw = &adapter->hw; 1339 struct e1000_hw *hw = &adapter->hw;
1316#endif 1340#endif
1317 1341
@@ -1323,7 +1347,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1323 1347
1324 flush_scheduled_work(); 1348 flush_scheduled_work();
1325 1349
1326#ifdef CONFIG_DCA 1350#ifdef CONFIG_IGB_DCA
1327 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 1351 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1328 dev_info(&pdev->dev, "DCA disabled\n"); 1352 dev_info(&pdev->dev, "DCA disabled\n");
1329 dca_remove_requester(&pdev->dev); 1353 dca_remove_requester(&pdev->dev);
@@ -1956,7 +1980,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1956 1980
1957/** 1981/**
1958 * igb_free_tx_resources - Free Tx Resources per Queue 1982 * igb_free_tx_resources - Free Tx Resources per Queue
1959 * @adapter: board private structure
1960 * @tx_ring: Tx descriptor ring for a specific queue 1983 * @tx_ring: Tx descriptor ring for a specific queue
1961 * 1984 *
1962 * Free all transmit software resources 1985 * Free all transmit software resources
@@ -2009,7 +2032,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2009 2032
2010/** 2033/**
2011 * igb_clean_tx_ring - Free Tx Buffers 2034 * igb_clean_tx_ring - Free Tx Buffers
2012 * @adapter: board private structure
2013 * @tx_ring: ring to be cleaned 2035 * @tx_ring: ring to be cleaned
2014 **/ 2036 **/
2015static void igb_clean_tx_ring(struct igb_ring *tx_ring) 2037static void igb_clean_tx_ring(struct igb_ring *tx_ring)
@@ -2056,7 +2078,6 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2056 2078
2057/** 2079/**
2058 * igb_free_rx_resources - Free Rx Resources 2080 * igb_free_rx_resources - Free Rx Resources
2059 * @adapter: board private structure
2060 * @rx_ring: ring to clean the resources from 2081 * @rx_ring: ring to clean the resources from
2061 * 2082 *
2062 * Free all receive software resources 2083 * Free all receive software resources
@@ -2096,7 +2117,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2096 2117
2097/** 2118/**
2098 * igb_clean_rx_ring - Free Rx Buffers per Queue 2119 * igb_clean_rx_ring - Free Rx Buffers per Queue
2099 * @adapter: board private structure
2100 * @rx_ring: ring to free buffers from 2120 * @rx_ring: ring to free buffers from
2101 **/ 2121 **/
2102static void igb_clean_rx_ring(struct igb_ring *rx_ring) 2122static void igb_clean_rx_ring(struct igb_ring *rx_ring)
@@ -3271,7 +3291,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
3271 struct igb_adapter *adapter = tx_ring->adapter; 3291 struct igb_adapter *adapter = tx_ring->adapter;
3272 struct e1000_hw *hw = &adapter->hw; 3292 struct e1000_hw *hw = &adapter->hw;
3273 3293
3274#ifdef CONFIG_DCA 3294#ifdef CONFIG_IGB_DCA
3275 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3295 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3276 igb_update_tx_dca(tx_ring); 3296 igb_update_tx_dca(tx_ring);
3277#endif 3297#endif
@@ -3323,14 +3343,14 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
3323 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) 3343 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
3324 __netif_rx_schedule(adapter->netdev, &rx_ring->napi); 3344 __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
3325 3345
3326#ifdef CONFIG_DCA 3346#ifdef CONFIG_IGB_DCA
3327 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3347 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3328 igb_update_rx_dca(rx_ring); 3348 igb_update_rx_dca(rx_ring);
3329#endif 3349#endif
3330 return IRQ_HANDLED; 3350 return IRQ_HANDLED;
3331} 3351}
3332 3352
3333#ifdef CONFIG_DCA 3353#ifdef CONFIG_IGB_DCA
3334static void igb_update_rx_dca(struct igb_ring *rx_ring) 3354static void igb_update_rx_dca(struct igb_ring *rx_ring)
3335{ 3355{
3336 u32 dca_rxctrl; 3356 u32 dca_rxctrl;
@@ -3450,7 +3470,7 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3450 3470
3451 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 3471 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3452} 3472}
3453#endif /* CONFIG_DCA */ 3473#endif /* CONFIG_IGB_DCA */
3454 3474
3455/** 3475/**
3456 * igb_intr_msi - Interrupt Handler 3476 * igb_intr_msi - Interrupt Handler
@@ -3529,13 +3549,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
3529 int tx_clean_complete, work_done = 0; 3549 int tx_clean_complete, work_done = 0;
3530 3550
3531 /* this poll routine only supports one tx and one rx queue */ 3551 /* this poll routine only supports one tx and one rx queue */
3532#ifdef CONFIG_DCA 3552#ifdef CONFIG_IGB_DCA
3533 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3553 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3534 igb_update_tx_dca(&adapter->tx_ring[0]); 3554 igb_update_tx_dca(&adapter->tx_ring[0]);
3535#endif 3555#endif
3536 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); 3556 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
3537 3557
3538#ifdef CONFIG_DCA 3558#ifdef CONFIG_IGB_DCA
3539 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3559 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3540 igb_update_rx_dca(&adapter->rx_ring[0]); 3560 igb_update_rx_dca(&adapter->rx_ring[0]);
3541#endif 3561#endif
@@ -3563,7 +3583,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3563 struct net_device *netdev = adapter->netdev; 3583 struct net_device *netdev = adapter->netdev;
3564 int work_done = 0; 3584 int work_done = 0;
3565 3585
3566#ifdef CONFIG_DCA 3586#ifdef CONFIG_IGB_DCA
3567 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3587 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3568 igb_update_rx_dca(rx_ring); 3588 igb_update_rx_dca(rx_ring);
3569#endif 3589#endif
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 7373dafbb3f7..059369885be1 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1112,7 +1112,7 @@ static void ipg_nic_rx_free_skb(struct net_device *dev)
1112 struct ipg_rx *rxfd = sp->rxd + entry; 1112 struct ipg_rx *rxfd = sp->rxd + entry;
1113 1113
1114 pci_unmap_single(sp->pdev, 1114 pci_unmap_single(sp->pdev,
1115 le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1115 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1116 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1116 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1117 dev_kfree_skb_irq(sp->rx_buff[entry]); 1117 dev_kfree_skb_irq(sp->rx_buff[entry]);
1118 sp->rx_buff[entry] = NULL; 1118 sp->rx_buff[entry] = NULL;
@@ -1179,7 +1179,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
1179 */ 1179 */
1180 if (sp->rx_buff[entry]) { 1180 if (sp->rx_buff[entry]) {
1181 pci_unmap_single(sp->pdev, 1181 pci_unmap_single(sp->pdev,
1182 le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1182 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1183 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1183 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1184 1184
1185 dev_kfree_skb_irq(sp->rx_buff[entry]); 1185 dev_kfree_skb_irq(sp->rx_buff[entry]);
@@ -1246,7 +1246,7 @@ static void ipg_nic_rx_with_start(struct net_device *dev,
1246 if (jumbo->found_start) 1246 if (jumbo->found_start)
1247 dev_kfree_skb_irq(jumbo->skb); 1247 dev_kfree_skb_irq(jumbo->skb);
1248 1248
1249 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1249 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1250 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1250 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1251 1251
1252 skb_put(skb, sp->rxfrag_size); 1252 skb_put(skb, sp->rxfrag_size);
@@ -1349,7 +1349,7 @@ static int ipg_nic_rx_jumbo(struct net_device *dev)
1349 unsigned int entry = curr % IPG_RFDLIST_LENGTH; 1349 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1350 struct ipg_rx *rxfd = sp->rxd + entry; 1350 struct ipg_rx *rxfd = sp->rxd + entry;
1351 1351
1352 if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE))) 1352 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1353 break; 1353 break;
1354 1354
1355 switch (ipg_nic_rx_check_frame_type(dev)) { 1355 switch (ipg_nic_rx_check_frame_type(dev)) {
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 2482d61662a2..2e67ae015d91 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -118,7 +118,6 @@
118#include <linux/errno.h> 118#include <linux/errno.h>
119#include <linux/init.h> 119#include <linux/init.h>
120#include <linux/slab.h> 120#include <linux/slab.h>
121#include <linux/module.h>
122#include <linux/kref.h> 121#include <linux/kref.h>
123#include <linux/usb.h> 122#include <linux/usb.h>
124#include <linux/device.h> 123#include <linux/device.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 1e0de93fd618..3843b5faba8b 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -82,7 +82,6 @@
82#include <linux/errno.h> 82#include <linux/errno.h>
83#include <linux/init.h> 83#include <linux/init.h>
84#include <linux/slab.h> 84#include <linux/slab.h>
85#include <linux/module.h>
86#include <linux/kref.h> 85#include <linux/kref.h>
87#include <linux/usb.h> 86#include <linux/usb.h>
88#include <linux/device.h> 87#include <linux/device.h>
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7548fb7360d9..5236f633ee36 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1287,13 +1287,39 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1287 return; 1287 return;
1288} 1288}
1289 1289
1290static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter); 1290/**
1291 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1292 * @adapter: board private structure
1293 **/
1294static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1295{
1296 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1297 IXGBE_WRITE_FLUSH(&adapter->hw);
1298 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1299 int i;
1300 for (i = 0; i < adapter->num_msix_vectors; i++)
1301 synchronize_irq(adapter->msix_entries[i].vector);
1302 } else {
1303 synchronize_irq(adapter->pdev->irq);
1304 }
1305}
1306
1307/**
1308 * ixgbe_irq_enable - Enable default interrupt generation settings
1309 * @adapter: board private structure
1310 **/
1311static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1312{
1313 u32 mask;
1314 mask = IXGBE_EIMS_ENABLE_MASK;
1315 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1316 IXGBE_WRITE_FLUSH(&adapter->hw);
1317}
1291 1318
1292/** 1319/**
1293 * ixgbe_intr - legacy mode Interrupt Handler 1320 * ixgbe_intr - legacy mode Interrupt Handler
1294 * @irq: interrupt number 1321 * @irq: interrupt number
1295 * @data: pointer to a network interface device structure 1322 * @data: pointer to a network interface device structure
1296 * @pt_regs: CPU registers structure
1297 **/ 1323 **/
1298static irqreturn_t ixgbe_intr(int irq, void *data) 1324static irqreturn_t ixgbe_intr(int irq, void *data)
1299{ 1325{
@@ -1394,35 +1420,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1394} 1420}
1395 1421
1396/** 1422/**
1397 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1398 * @adapter: board private structure
1399 **/
1400static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1401{
1402 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1403 IXGBE_WRITE_FLUSH(&adapter->hw);
1404 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1405 int i;
1406 for (i = 0; i < adapter->num_msix_vectors; i++)
1407 synchronize_irq(adapter->msix_entries[i].vector);
1408 } else {
1409 synchronize_irq(adapter->pdev->irq);
1410 }
1411}
1412
1413/**
1414 * ixgbe_irq_enable - Enable default interrupt generation settings
1415 * @adapter: board private structure
1416 **/
1417static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1418{
1419 u32 mask;
1420 mask = IXGBE_EIMS_ENABLE_MASK;
1421 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1422 IXGBE_WRITE_FLUSH(&adapter->hw);
1423}
1424
1425/**
1426 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts 1423 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1427 * 1424 *
1428 **/ 1425 **/
@@ -2332,7 +2329,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2332 * Once we know the feature-set enabled for the device, we'll cache 2329 * Once we know the feature-set enabled for the device, we'll cache
2333 * the register offset the descriptor ring is assigned to. 2330 * the register offset the descriptor ring is assigned to.
2334 **/ 2331 **/
2335static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2332static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2336{ 2333{
2337 int feature_mask = 0, rss_i; 2334 int feature_mask = 0, rss_i;
2338 int i, txr_idx, rxr_idx; 2335 int i, txr_idx, rxr_idx;
@@ -2369,7 +2366,7 @@ static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2369 * number of queues at compile-time. The polling_netdev array is 2366 * number of queues at compile-time. The polling_netdev array is
2370 * intended for Multiqueue, but should work fine with a single queue. 2367 * intended for Multiqueue, but should work fine with a single queue.
2371 **/ 2368 **/
2372static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 2369static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2373{ 2370{
2374 int i; 2371 int i;
2375 2372
@@ -2410,8 +2407,7 @@ err_tx_ring_allocation:
2410 * Attempt to configure the interrupts using the best available 2407 * Attempt to configure the interrupts using the best available
2411 * capabilities of the hardware and the kernel. 2408 * capabilities of the hardware and the kernel.
2412 **/ 2409 **/
2413static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter 2410static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
2414 *adapter)
2415{ 2411{
2416 int err = 0; 2412 int err = 0;
2417 int vector, v_budget; 2413 int vector, v_budget;
@@ -2503,7 +2499,7 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2503 * - Hardware queue count (num_*_queues) 2499 * - Hardware queue count (num_*_queues)
2504 * - defined by miscellaneous hardware support/features (RSS, etc.) 2500 * - defined by miscellaneous hardware support/features (RSS, etc.)
2505 **/ 2501 **/
2506static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 2502static int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2507{ 2503{
2508 int err; 2504 int err;
2509 2505
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 81c6cdc3851f..665e70d620fc 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -912,23 +912,23 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
912 skb_put(skb, framesize); 912 skb_put(skb, framesize);
913 skb->protocol = eth_type_trans(skb, jme->dev); 913 skb->protocol = eth_type_trans(skb, jme->dev);
914 914
915 if (jme_rxsum_ok(jme, rxdesc->descwb.flags)) 915 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
916 skb->ip_summed = CHECKSUM_UNNECESSARY; 916 skb->ip_summed = CHECKSUM_UNNECESSARY;
917 else 917 else
918 skb->ip_summed = CHECKSUM_NONE; 918 skb->ip_summed = CHECKSUM_NONE;
919 919
920 if (rxdesc->descwb.flags & RXWBFLAG_TAGON) { 920 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
921 if (jme->vlgrp) { 921 if (jme->vlgrp) {
922 jme->jme_vlan_rx(skb, jme->vlgrp, 922 jme->jme_vlan_rx(skb, jme->vlgrp,
923 le32_to_cpu(rxdesc->descwb.vlan)); 923 le16_to_cpu(rxdesc->descwb.vlan));
924 NET_STAT(jme).rx_bytes += 4; 924 NET_STAT(jme).rx_bytes += 4;
925 } 925 }
926 } else { 926 } else {
927 jme->jme_rx(skb); 927 jme->jme_rx(skb);
928 } 928 }
929 929
930 if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) == 930 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
931 RXWBFLAG_DEST_MUL) 931 cpu_to_le16(RXWBFLAG_DEST_MUL))
932 ++(NET_STAT(jme).multicast); 932 ++(NET_STAT(jme).multicast);
933 933
934 jme->dev->last_rx = jiffies; 934 jme->dev->last_rx = jiffies;
@@ -961,7 +961,7 @@ jme_process_receive(struct jme_adapter *jme, int limit)
961 rxdesc = rxring->desc; 961 rxdesc = rxring->desc;
962 rxdesc += i; 962 rxdesc += i;
963 963
964 if ((rxdesc->descwb.flags & RXWBFLAG_OWN) || 964 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
965 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) 965 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
966 goto out; 966 goto out;
967 967
@@ -1763,10 +1763,9 @@ jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1763} 1763}
1764 1764
1765static int 1765static int
1766jme_tx_tso(struct sk_buff *skb, 1766jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
1767 u16 *mss, u8 *flags)
1768{ 1767{
1769 *mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT; 1768 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
1770 if (*mss) { 1769 if (*mss) {
1771 *flags |= TXFLAG_LSEN; 1770 *flags |= TXFLAG_LSEN;
1772 1771
@@ -1826,11 +1825,11 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1826} 1825}
1827 1826
1828static inline void 1827static inline void
1829jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags) 1828jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
1830{ 1829{
1831 if (vlan_tx_tag_present(skb)) { 1830 if (vlan_tx_tag_present(skb)) {
1832 *flags |= TXFLAG_TAGON; 1831 *flags |= TXFLAG_TAGON;
1833 *vlan = vlan_tx_tag_get(skb); 1832 *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1834 } 1833 }
1835} 1834}
1836 1835
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index f863aee6648b..3f5d91543246 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24#ifndef __JME_H_INCLUDED__ 24#ifndef __JME_H_INCLUDED__
25#define __JME_H_INCLUDEE__ 25#define __JME_H_INCLUDED__
26 26
27#define DRV_NAME "jme" 27#define DRV_NAME "jme"
28#define DRV_VERSION "1.0.3" 28#define DRV_VERSION "1.0.3"
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 3b43bfd85a0f..b1ac63ab8c16 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -76,15 +76,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
76 76
77 skb->protocol = eth_type_trans(skb,dev); 77 skb->protocol = eth_type_trans(skb,dev);
78 78
79#ifdef LOOPBACK_TSO
80 if (skb_is_gso(skb)) {
81 BUG_ON(skb->protocol != htons(ETH_P_IP));
82 BUG_ON(ip_hdr(skb)->protocol != IPPROTO_TCP);
83
84 emulate_large_send_offload(skb);
85 return 0;
86 }
87#endif
88 dev->last_rx = jiffies; 79 dev->last_rx = jiffies;
89 80
90 /* it's OK to use per_cpu_ptr() because BHs are off */ 81 /* it's OK to use per_cpu_ptr() because BHs are off */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 42394505bb50..590039cbb146 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -70,6 +70,9 @@ static void macvlan_broadcast(struct sk_buff *skb,
70 struct sk_buff *nskb; 70 struct sk_buff *nskb;
71 unsigned int i; 71 unsigned int i;
72 72
73 if (skb->protocol == htons(ETH_P_PAUSE))
74 return;
75
73 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 76 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
74 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { 77 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
75 dev = vlan->dev; 78 dev = vlan->dev;
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 0952a6528f58..a7a97bf998f8 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,4 +1,9 @@
1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o 1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2 2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ 3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
4 mr.o pd.o profile.o qp.o reset.o srq.o 4 mr.o pd.o port.o profile.o qp.o reset.o srq.o
5
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index b411b79d72ad..ad95d5f7b630 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -48,13 +48,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
48 48
49 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 49 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
50 if (obj >= bitmap->max) { 50 if (obj >= bitmap->max) {
51 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 51 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
52 & bitmap->mask;
52 obj = find_first_zero_bit(bitmap->table, bitmap->max); 53 obj = find_first_zero_bit(bitmap->table, bitmap->max);
53 } 54 }
54 55
55 if (obj < bitmap->max) { 56 if (obj < bitmap->max) {
56 set_bit(obj, bitmap->table); 57 set_bit(obj, bitmap->table);
57 bitmap->last = (obj + 1) & (bitmap->max - 1); 58 bitmap->last = (obj + 1);
59 if (bitmap->last == bitmap->max)
60 bitmap->last = 0;
58 obj |= bitmap->top; 61 obj |= bitmap->top;
59 } else 62 } else
60 obj = -1; 63 obj = -1;
@@ -66,16 +69,90 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
66 69
67void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) 70void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
68{ 71{
69 obj &= bitmap->max - 1; 72 mlx4_bitmap_free_range(bitmap, obj, 1);
73}
74
75static unsigned long find_aligned_range(unsigned long *bitmap,
76 u32 start, u32 nbits,
77 int len, int align)
78{
79 unsigned long end, i;
80
81again:
82 start = ALIGN(start, align);
83
84 while ((start < nbits) && test_bit(start, bitmap))
85 start += align;
86
87 if (start >= nbits)
88 return -1;
89
90 end = start+len;
91 if (end > nbits)
92 return -1;
93
94 for (i = start + 1; i < end; i++) {
95 if (test_bit(i, bitmap)) {
96 start = i + 1;
97 goto again;
98 }
99 }
100
101 return start;
102}
103
104u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
105{
106 u32 obj, i;
107
108 if (likely(cnt == 1 && align == 1))
109 return mlx4_bitmap_alloc(bitmap);
110
111 spin_lock(&bitmap->lock);
112
113 obj = find_aligned_range(bitmap->table, bitmap->last,
114 bitmap->max, cnt, align);
115 if (obj >= bitmap->max) {
116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
117 & bitmap->mask;
118 obj = find_aligned_range(bitmap->table, 0, bitmap->max,
119 cnt, align);
120 }
121
122 if (obj < bitmap->max) {
123 for (i = 0; i < cnt; i++)
124 set_bit(obj + i, bitmap->table);
125 if (obj == bitmap->last) {
126 bitmap->last = (obj + cnt);
127 if (bitmap->last >= bitmap->max)
128 bitmap->last = 0;
129 }
130 obj |= bitmap->top;
131 } else
132 obj = -1;
133
134 spin_unlock(&bitmap->lock);
135
136 return obj;
137}
138
139void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
140{
141 u32 i;
142
143 obj &= bitmap->max + bitmap->reserved_top - 1;
70 144
71 spin_lock(&bitmap->lock); 145 spin_lock(&bitmap->lock);
72 clear_bit(obj, bitmap->table); 146 for (i = 0; i < cnt; i++)
147 clear_bit(obj + i, bitmap->table);
73 bitmap->last = min(bitmap->last, obj); 148 bitmap->last = min(bitmap->last, obj);
74 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 149 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
150 & bitmap->mask;
75 spin_unlock(&bitmap->lock); 151 spin_unlock(&bitmap->lock);
76} 152}
77 153
78int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved) 154int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
155 u32 reserved_bot, u32 reserved_top)
79{ 156{
80 int i; 157 int i;
81 158
@@ -85,14 +162,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved
85 162
86 bitmap->last = 0; 163 bitmap->last = 0;
87 bitmap->top = 0; 164 bitmap->top = 0;
88 bitmap->max = num; 165 bitmap->max = num - reserved_top;
89 bitmap->mask = mask; 166 bitmap->mask = mask;
167 bitmap->reserved_top = reserved_top;
90 spin_lock_init(&bitmap->lock); 168 spin_lock_init(&bitmap->lock);
91 bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL); 169 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
170 sizeof (long), GFP_KERNEL);
92 if (!bitmap->table) 171 if (!bitmap->table)
93 return -ENOMEM; 172 return -ENOMEM;
94 173
95 for (i = 0; i < reserved; ++i) 174 for (i = 0; i < reserved_bot; ++i)
96 set_bit(i, bitmap->table); 175 set_bit(i, bitmap->table);
97 176
98 return 0; 177 return 0;
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 9bb50e3f8974..b7ad2829d67e 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); 300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
301 301
302 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, 302 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
303 dev->caps.num_cqs - 1, dev->caps.reserved_cqs); 303 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
304 if (err) 304 if (err)
305 return err; 305 return err;
306 306
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
new file mode 100644
index 000000000000..1368a8010af4
--- /dev/null
+++ b/drivers/net/mlx4/en_cq.c
@@ -0,0 +1,146 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/mlx4/cmd.h>
37
38#include "mlx4_en.h"
39
40static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
41{
42 return;
43}
44
45
46int mlx4_en_create_cq(struct mlx4_en_priv *priv,
47 struct mlx4_en_cq *cq,
48 int entries, int ring, enum cq_type mode)
49{
50 struct mlx4_en_dev *mdev = priv->mdev;
51 int err;
52
53 cq->size = entries;
54 if (mode == RX)
55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
56 else
57 cq->buf_size = sizeof(struct mlx4_cqe);
58
59 cq->ring = ring;
60 cq->is_tx = mode;
61 spin_lock_init(&cq->lock);
62
63 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
64 cq->buf_size, 2 * PAGE_SIZE);
65 if (err)
66 return err;
67
68 err = mlx4_en_map_buffer(&cq->wqres.buf);
69 if (err)
70 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
71
72 return err;
73}
74
75int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
76{
77 struct mlx4_en_dev *mdev = priv->mdev;
78 int err;
79
80 cq->dev = mdev->pndev[priv->port];
81 cq->mcq.set_ci_db = cq->wqres.db.db;
82 cq->mcq.arm_db = cq->wqres.db.db + 1;
83 *cq->mcq.set_ci_db = 0;
84 *cq->mcq.arm_db = 0;
85 cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
86 memset(cq->buf, 0, cq->buf_size);
87
88 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
89 cq->wqres.db.dma, &cq->mcq, cq->is_tx);
90 if (err)
91 return err;
92
93 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
94 cq->mcq.event = mlx4_en_cq_event;
95
96 if (cq->is_tx) {
97 init_timer(&cq->timer);
98 cq->timer.function = mlx4_en_poll_tx_cq;
99 cq->timer.data = (unsigned long) cq;
100 } else {
101 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
102 napi_enable(&cq->napi);
103 }
104
105 return 0;
106}
107
108void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
109{
110 struct mlx4_en_dev *mdev = priv->mdev;
111
112 mlx4_en_unmap_buffer(&cq->wqres.buf);
113 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
114 cq->buf_size = 0;
115 cq->buf = NULL;
116}
117
118void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
119{
120 struct mlx4_en_dev *mdev = priv->mdev;
121
122 if (cq->is_tx)
123 del_timer(&cq->timer);
124 else
125 napi_disable(&cq->napi);
126
127 mlx4_cq_free(mdev->dev, &cq->mcq);
128}
129
130/* Set rx cq moderation parameters */
131int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
132{
133 return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
134 cq->moder_cnt, cq->moder_time);
135}
136
137int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
138{
139 cq->armed = 1;
140 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
141 &priv->mdev->uar_lock);
142
143 return 0;
144}
145
146
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
new file mode 100644
index 000000000000..4b9794e97a79
--- /dev/null
+++ b/drivers/net/mlx4/en_main.c
@@ -0,0 +1,253 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/cpumask.h>
35#include <linux/module.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38
39#include <linux/mlx4/driver.h>
40#include <linux/mlx4/device.h>
41#include <linux/mlx4/cmd.h>
42
43#include "mlx4_en.h"
44
45MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
46MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
47MODULE_LICENSE("Dual BSD/GPL");
48MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
49
50static const char mlx4_en_version[] =
51 DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
52 DRV_VERSION " (" DRV_RELDATE ")\n";
53
54static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
55 enum mlx4_dev_event event, int port)
56{
57 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
58 struct mlx4_en_priv *priv;
59
60 if (!mdev->pndev[port])
61 return;
62
63 priv = netdev_priv(mdev->pndev[port]);
64 switch (event) {
65 case MLX4_DEV_EVENT_PORT_UP:
66 case MLX4_DEV_EVENT_PORT_DOWN:
67 /* To prevent races, we poll the link state in a separate
68 task rather than changing it here */
69 priv->link_state = event;
70 queue_work(mdev->workqueue, &priv->linkstate_task);
71 break;
72
73 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
74 mlx4_err(mdev, "Internal error detected, restarting device\n");
75 break;
76
77 default:
78 mlx4_warn(mdev, "Unhandled event: %d\n", event);
79 }
80}
81
82static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
83{
84 struct mlx4_en_dev *mdev = endev_ptr;
85 int i;
86
87 mutex_lock(&mdev->state_lock);
88 mdev->device_up = false;
89 mutex_unlock(&mdev->state_lock);
90
91 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
92 if (mdev->pndev[i])
93 mlx4_en_destroy_netdev(mdev->pndev[i]);
94
95 flush_workqueue(mdev->workqueue);
96 destroy_workqueue(mdev->workqueue);
97 mlx4_mr_free(dev, &mdev->mr);
98 mlx4_uar_free(dev, &mdev->priv_uar);
99 mlx4_pd_free(dev, mdev->priv_pdn);
100 kfree(mdev);
101}
102
103static void *mlx4_en_add(struct mlx4_dev *dev)
104{
105 static int mlx4_en_version_printed;
106 struct mlx4_en_dev *mdev;
107 int i;
108 int err;
109
110 if (!mlx4_en_version_printed) {
111 printk(KERN_INFO "%s", mlx4_en_version);
112 mlx4_en_version_printed++;
113 }
114
115 mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
116 if (!mdev) {
117 dev_err(&dev->pdev->dev, "Device struct alloc failed, "
118 "aborting.\n");
119 err = -ENOMEM;
120 goto err_free_res;
121 }
122
123 if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
124 goto err_free_dev;
125
126 if (mlx4_uar_alloc(dev, &mdev->priv_uar))
127 goto err_pd;
128
129 mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
130 if (!mdev->uar_map)
131 goto err_uar;
132 spin_lock_init(&mdev->uar_lock);
133
134 mdev->dev = dev;
135 mdev->dma_device = &(dev->pdev->dev);
136 mdev->pdev = dev->pdev;
137 mdev->device_up = false;
138
139 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
140 if (!mdev->LSO_support)
141 mlx4_warn(mdev, "LSO not supported, please upgrade to later "
142 "FW version to enable LSO\n");
143
144 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
145 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
146 0, 0, &mdev->mr)) {
147 mlx4_err(mdev, "Failed allocating memory region\n");
148 goto err_uar;
149 }
150 if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
151 mlx4_err(mdev, "Failed enabling memory region\n");
152 goto err_mr;
153 }
154
155 /* Build device profile according to supplied module parameters */
156 err = mlx4_en_get_profile(mdev);
157 if (err) {
158 mlx4_err(mdev, "Bad module parameters, aborting.\n");
159 goto err_mr;
160 }
161
162 /* Configure wich ports to start according to module parameters */
163 mdev->port_cnt = 0;
164 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
165 mdev->port_cnt++;
166
167 /* If we did not receive an explicit number of Rx rings, default to
168 * the number of completion vectors populated by the mlx4_core */
169 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
170 mlx4_info(mdev, "Using %d tx rings for port:%d\n",
171 mdev->profile.prof[i].tx_ring_num, i);
172 if (!mdev->profile.prof[i].rx_ring_num) {
173 mdev->profile.prof[i].rx_ring_num = 1;
174 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
175 1, i);
176 } else
177 mlx4_info(mdev, "Using %d rx rings for port:%d\n",
178 mdev->profile.prof[i].rx_ring_num, i);
179 }
180
181 /* Create our own workqueue for reset/multicast tasks
182 * Note: we cannot use the shared workqueue because of deadlocks caused
183 * by the rtnl lock */
184 mdev->workqueue = create_singlethread_workqueue("mlx4_en");
185 if (!mdev->workqueue) {
186 err = -ENOMEM;
187 goto err_close_nic;
188 }
189
190 /* At this stage all non-port specific tasks are complete:
191 * mark the card state as up */
192 mutex_init(&mdev->state_lock);
193 mdev->device_up = true;
194
195 /* Setup ports */
196
197 /* Create a netdev for each port */
198 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
199 mlx4_info(mdev, "Activating port:%d\n", i);
200 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
201 mdev->pndev[i] = NULL;
202 goto err_free_netdev;
203 }
204 }
205 return mdev;
206
207
208err_free_netdev:
209 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
210 if (mdev->pndev[i])
211 mlx4_en_destroy_netdev(mdev->pndev[i]);
212 }
213
214 mutex_lock(&mdev->state_lock);
215 mdev->device_up = false;
216 mutex_unlock(&mdev->state_lock);
217 flush_workqueue(mdev->workqueue);
218
219 /* Stop event queue before we drop down to release shared SW state */
220
221err_close_nic:
222 destroy_workqueue(mdev->workqueue);
223err_mr:
224 mlx4_mr_free(dev, &mdev->mr);
225err_uar:
226 mlx4_uar_free(dev, &mdev->priv_uar);
227err_pd:
228 mlx4_pd_free(dev, mdev->priv_pdn);
229err_free_dev:
230 kfree(mdev);
231err_free_res:
232 return NULL;
233}
234
235static struct mlx4_interface mlx4_en_interface = {
236 .add = mlx4_en_add,
237 .remove = mlx4_en_remove,
238 .event = mlx4_en_event,
239};
240
241static int __init mlx4_en_init(void)
242{
243 return mlx4_register_interface(&mlx4_en_interface);
244}
245
246static void __exit mlx4_en_cleanup(void)
247{
248 mlx4_unregister_interface(&mlx4_en_interface);
249}
250
251module_init(mlx4_en_init);
252module_exit(mlx4_en_cleanup);
253
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
new file mode 100644
index 000000000000..96e709d6440a
--- /dev/null
+++ b/drivers/net/mlx4/en_netdev.c
@@ -0,0 +1,1088 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/etherdevice.h>
35#include <linux/tcp.h>
36#include <linux/if_vlan.h>
37#include <linux/delay.h>
38
39#include <linux/mlx4/driver.h>
40#include <linux/mlx4/device.h>
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/cq.h>
43
44#include "mlx4_en.h"
45#include "en_port.h"
46
47
48static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
49{
50 struct mlx4_en_priv *priv = netdev_priv(dev);
51 struct mlx4_en_dev *mdev = priv->mdev;
52 int err;
53
54 mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
55 priv->vlgrp = grp;
56
57 mutex_lock(&mdev->state_lock);
58 if (mdev->device_up && priv->port_up) {
59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
60 if (err)
61 mlx4_err(mdev, "Failed configuring VLAN filter\n");
62 }
63 mutex_unlock(&mdev->state_lock);
64}
65
66static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
67{
68 struct mlx4_en_priv *priv = netdev_priv(dev);
69 struct mlx4_en_dev *mdev = priv->mdev;
70 int err;
71
72 if (!priv->vlgrp)
73 return;
74
75 mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
76 vid, vlan_group_get_device(priv->vlgrp, vid));
77
78 /* Add VID to port VLAN filter */
79 mutex_lock(&mdev->state_lock);
80 if (mdev->device_up && priv->port_up) {
81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
82 if (err)
83 mlx4_err(mdev, "Failed configuring VLAN filter\n");
84 }
85 mutex_unlock(&mdev->state_lock);
86}
87
88static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
89{
90 struct mlx4_en_priv *priv = netdev_priv(dev);
91 struct mlx4_en_dev *mdev = priv->mdev;
92 int err;
93
94 if (!priv->vlgrp)
95 return;
96
97 mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp "
98 "entry:%p)\n", vid, priv->vlgrp,
99 vlan_group_get_device(priv->vlgrp, vid));
100 vlan_group_set_device(priv->vlgrp, vid, NULL);
101
102 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev->state_lock);
104 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err)
107 mlx4_err(mdev, "Failed configuring VLAN filter\n");
108 }
109 mutex_unlock(&mdev->state_lock);
110}
111
112static u64 mlx4_en_mac_to_u64(u8 *addr)
113{
114 u64 mac = 0;
115 int i;
116
117 for (i = 0; i < ETH_ALEN; i++) {
118 mac <<= 8;
119 mac |= addr[i];
120 }
121 return mac;
122}
123
124static int mlx4_en_set_mac(struct net_device *dev, void *addr)
125{
126 struct mlx4_en_priv *priv = netdev_priv(dev);
127 struct mlx4_en_dev *mdev = priv->mdev;
128 struct sockaddr *saddr = addr;
129
130 if (!is_valid_ether_addr(saddr->sa_data))
131 return -EADDRNOTAVAIL;
132
133 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
134 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
135 queue_work(mdev->workqueue, &priv->mac_task);
136 return 0;
137}
138
139static void mlx4_en_do_set_mac(struct work_struct *work)
140{
141 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
142 mac_task);
143 struct mlx4_en_dev *mdev = priv->mdev;
144 int err = 0;
145
146 mutex_lock(&mdev->state_lock);
147 if (priv->port_up) {
148 /* Remove old MAC and insert the new one */
149 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
150 err = mlx4_register_mac(mdev->dev, priv->port,
151 priv->mac, &priv->mac_index);
152 if (err)
153 mlx4_err(mdev, "Failed changing HW MAC address\n");
154 } else
155 mlx4_dbg(HW, priv, "Port is down, exiting...\n");
156
157 mutex_unlock(&mdev->state_lock);
158}
159
160static void mlx4_en_clear_list(struct net_device *dev)
161{
162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct dev_mc_list *plist = priv->mc_list;
164 struct dev_mc_list *next;
165
166 while (plist) {
167 next = plist->next;
168 kfree(plist);
169 plist = next;
170 }
171 priv->mc_list = NULL;
172}
173
174static void mlx4_en_cache_mclist(struct net_device *dev)
175{
176 struct mlx4_en_priv *priv = netdev_priv(dev);
177 struct mlx4_en_dev *mdev = priv->mdev;
178 struct dev_mc_list *mclist;
179 struct dev_mc_list *tmp;
180 struct dev_mc_list *plist = NULL;
181
182 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
183 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
184 if (!tmp) {
185 mlx4_err(mdev, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev);
187 return;
188 }
189 memcpy(tmp, mclist, sizeof(struct dev_mc_list));
190 tmp->next = NULL;
191 if (plist)
192 plist->next = tmp;
193 else
194 priv->mc_list = tmp;
195 plist = tmp;
196 }
197}
198
199
200static void mlx4_en_set_multicast(struct net_device *dev)
201{
202 struct mlx4_en_priv *priv = netdev_priv(dev);
203
204 if (!priv->port_up)
205 return;
206
207 queue_work(priv->mdev->workqueue, &priv->mcast_task);
208}
209
210static void mlx4_en_do_set_multicast(struct work_struct *work)
211{
212 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
213 mcast_task);
214 struct mlx4_en_dev *mdev = priv->mdev;
215 struct net_device *dev = priv->dev;
216 struct dev_mc_list *mclist;
217 u64 mcast_addr = 0;
218 int err;
219
220 mutex_lock(&mdev->state_lock);
221 if (!mdev->device_up) {
222 mlx4_dbg(HW, priv, "Card is not up, ignoring "
223 "multicast change.\n");
224 goto out;
225 }
226 if (!priv->port_up) {
227 mlx4_dbg(HW, priv, "Port is down, ignoring "
228 "multicast change.\n");
229 goto out;
230 }
231
232 /*
233 * Promsicuous mode: disable all filters
234 */
235
236 if (dev->flags & IFF_PROMISC) {
237 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
238 if (netif_msg_rx_status(priv))
239 mlx4_warn(mdev, "Port:%d entering promiscuous mode\n",
240 priv->port);
241 priv->flags |= MLX4_EN_FLAG_PROMISC;
242
243 /* Enable promiscouos mode */
244 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
245 priv->base_qpn, 1);
246 if (err)
247 mlx4_err(mdev, "Failed enabling "
248 "promiscous mode\n");
249
250 /* Disable port multicast filter (unconditionally) */
251 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
252 0, MLX4_MCAST_DISABLE);
253 if (err)
254 mlx4_err(mdev, "Failed disabling "
255 "multicast filter\n");
256
257 /* Disable port VLAN filter */
258 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
259 if (err)
260 mlx4_err(mdev, "Failed disabling "
261 "VLAN filter\n");
262 }
263 goto out;
264 }
265
266 /*
267 * Not in promiscous mode
268 */
269
270 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
271 if (netif_msg_rx_status(priv))
272 mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n",
273 priv->port);
274 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
275
276 /* Disable promiscouos mode */
277 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
278 priv->base_qpn, 0);
279 if (err)
280 mlx4_err(mdev, "Failed disabling promiscous mode\n");
281
282 /* Enable port VLAN filter */
283 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
284 if (err)
285 mlx4_err(mdev, "Failed enabling VLAN filter\n");
286 }
287
288 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
289 if (dev->flags & IFF_ALLMULTI) {
290 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
291 0, MLX4_MCAST_DISABLE);
292 if (err)
293 mlx4_err(mdev, "Failed disabling multicast filter\n");
294 } else {
295 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
296 0, MLX4_MCAST_DISABLE);
297 if (err)
298 mlx4_err(mdev, "Failed disabling multicast filter\n");
299
300 /* Flush mcast filter and init it with broadcast address */
301 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
302 1, MLX4_MCAST_CONFIG);
303
304 /* Update multicast list - we cache all addresses so they won't
305 * change while HW is updated holding the command semaphor */
306 netif_tx_lock_bh(dev);
307 mlx4_en_cache_mclist(dev);
308 netif_tx_unlock_bh(dev);
309 for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
310 mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
311 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
312 mcast_addr, 0, MLX4_MCAST_CONFIG);
313 }
314 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
315 0, MLX4_MCAST_ENABLE);
316 if (err)
317 mlx4_err(mdev, "Failed enabling multicast filter\n");
318
319 mlx4_en_clear_list(dev);
320 }
321out:
322 mutex_unlock(&mdev->state_lock);
323}
324
325#ifdef CONFIG_NET_POLL_CONTROLLER
326static void mlx4_en_netpoll(struct net_device *dev)
327{
328 struct mlx4_en_priv *priv = netdev_priv(dev);
329 struct mlx4_en_cq *cq;
330 unsigned long flags;
331 int i;
332
333 for (i = 0; i < priv->rx_ring_num; i++) {
334 cq = &priv->rx_cq[i];
335 spin_lock_irqsave(&cq->lock, flags);
336 napi_synchronize(&cq->napi);
337 mlx4_en_process_rx_cq(dev, cq, 0);
338 spin_unlock_irqrestore(&cq->lock, flags);
339 }
340}
341#endif
342
343static void mlx4_en_tx_timeout(struct net_device *dev)
344{
345 struct mlx4_en_priv *priv = netdev_priv(dev);
346 struct mlx4_en_dev *mdev = priv->mdev;
347
348 if (netif_msg_timer(priv))
349 mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
350
351 if (netif_carrier_ok(dev)) {
352 priv->port_stats.tx_timeout++;
353 mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
354 queue_work(mdev->workqueue, &priv->watchdog_task);
355 }
356}
357
358
359static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
360{
361 struct mlx4_en_priv *priv = netdev_priv(dev);
362
363 spin_lock_bh(&priv->stats_lock);
364 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
365 spin_unlock_bh(&priv->stats_lock);
366
367 return &priv->ret_stats;
368}
369
370static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
371{
372 struct mlx4_en_dev *mdev = priv->mdev;
373 struct mlx4_en_cq *cq;
374 int i;
375
376 /* If we haven't received a specific coalescing setting
377 * (module param), we set the moderation paramters as follows:
378 * - moder_cnt is set to the number of mtu sized packets to
379 * satisfy our coelsing target.
380 * - moder_time is set to a fixed value.
381 */
382 priv->rx_frames = (mdev->profile.rx_moder_cnt ==
383 MLX4_EN_AUTO_CONF) ?
384 MLX4_EN_RX_COAL_TARGET /
385 priv->dev->mtu + 1 :
386 mdev->profile.rx_moder_cnt;
387 priv->rx_usecs = (mdev->profile.rx_moder_time ==
388 MLX4_EN_AUTO_CONF) ?
389 MLX4_EN_RX_COAL_TIME :
390 mdev->profile.rx_moder_time;
391 mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
392 "rx_frames:%d rx_usecs:%d\n",
393 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
394
395 /* Setup cq moderation params */
396 for (i = 0; i < priv->rx_ring_num; i++) {
397 cq = &priv->rx_cq[i];
398 cq->moder_cnt = priv->rx_frames;
399 cq->moder_time = priv->rx_usecs;
400 }
401
402 for (i = 0; i < priv->tx_ring_num; i++) {
403 cq = &priv->tx_cq[i];
404 cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
405 cq->moder_time = MLX4_EN_TX_COAL_TIME;
406 }
407
408 /* Reset auto-moderation params */
409 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
410 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
411 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
412 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
413 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
414 priv->adaptive_rx_coal = mdev->profile.auto_moder;
415 priv->last_moder_time = MLX4_EN_AUTO_CONF;
416 priv->last_moder_jiffies = 0;
417 priv->last_moder_packets = 0;
418 priv->last_moder_tx_packets = 0;
419 priv->last_moder_bytes = 0;
420}
421
422static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
423{
424 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
425 struct mlx4_en_dev *mdev = priv->mdev;
426 struct mlx4_en_cq *cq;
427 unsigned long packets;
428 unsigned long rate;
429 unsigned long avg_pkt_size;
430 unsigned long rx_packets;
431 unsigned long rx_bytes;
432 unsigned long tx_packets;
433 unsigned long tx_pkt_diff;
434 unsigned long rx_pkt_diff;
435 int moder_time;
436 int i, err;
437
438 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
439 return;
440
441 spin_lock_bh(&priv->stats_lock);
442 rx_packets = priv->stats.rx_packets;
443 rx_bytes = priv->stats.rx_bytes;
444 tx_packets = priv->stats.tx_packets;
445 spin_unlock_bh(&priv->stats_lock);
446
447 if (!priv->last_moder_jiffies || !period)
448 goto out;
449
450 tx_pkt_diff = ((unsigned long) (tx_packets -
451 priv->last_moder_tx_packets));
452 rx_pkt_diff = ((unsigned long) (rx_packets -
453 priv->last_moder_packets));
454 packets = max(tx_pkt_diff, rx_pkt_diff);
455 rate = packets * HZ / period;
456 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
457 priv->last_moder_bytes)) / packets : 0;
458
459 /* Apply auto-moderation only when packet rate exceeds a rate that
460 * it matters */
461 if (rate > MLX4_EN_RX_RATE_THRESH) {
462 /* If tx and rx packet rates are not balanced, assume that
463 * traffic is mainly BW bound and apply maximum moderation.
464 * Otherwise, moderate according to packet rate */
465 if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
466 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
467 moder_time = priv->rx_usecs_high;
468 } else {
469 if (rate < priv->pkt_rate_low)
470 moder_time = priv->rx_usecs_low;
471 else if (rate > priv->pkt_rate_high)
472 moder_time = priv->rx_usecs_high;
473 else
474 moder_time = (rate - priv->pkt_rate_low) *
475 (priv->rx_usecs_high - priv->rx_usecs_low) /
476 (priv->pkt_rate_high - priv->pkt_rate_low) +
477 priv->rx_usecs_low;
478 }
479 } else {
480 /* When packet rate is low, use default moderation rather than
481 * 0 to prevent interrupt storms if traffic suddenly increases */
482 moder_time = priv->rx_usecs;
483 }
484
485 mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
486 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
487
488 mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
489 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
490 priv->last_moder_time, moder_time, period, packets,
491 avg_pkt_size, rate);
492
493 if (moder_time != priv->last_moder_time) {
494 priv->last_moder_time = moder_time;
495 for (i = 0; i < priv->rx_ring_num; i++) {
496 cq = &priv->rx_cq[i];
497 cq->moder_time = moder_time;
498 err = mlx4_en_set_cq_moder(priv, cq);
499 if (err) {
500 mlx4_err(mdev, "Failed modifying moderation for cq:%d "
501 "on port:%d\n", i, priv->port);
502 break;
503 }
504 }
505 }
506
507out:
508 priv->last_moder_packets = rx_packets;
509 priv->last_moder_tx_packets = tx_packets;
510 priv->last_moder_bytes = rx_bytes;
511 priv->last_moder_jiffies = jiffies;
512}
513
514static void mlx4_en_do_get_stats(struct work_struct *work)
515{
516 struct delayed_work *delay = container_of(work, struct delayed_work, work);
517 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
518 stats_task);
519 struct mlx4_en_dev *mdev = priv->mdev;
520 int err;
521
522 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
523 if (err)
524 mlx4_dbg(HW, priv, "Could not update stats for "
525 "port:%d\n", priv->port);
526
527 mutex_lock(&mdev->state_lock);
528 if (mdev->device_up) {
529 if (priv->port_up)
530 mlx4_en_auto_moderation(priv);
531
532 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
533 }
534 mutex_unlock(&mdev->state_lock);
535}
536
537static void mlx4_en_linkstate(struct work_struct *work)
538{
539 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
540 linkstate_task);
541 struct mlx4_en_dev *mdev = priv->mdev;
542 int linkstate = priv->link_state;
543
544 mutex_lock(&mdev->state_lock);
545 /* If observable port state changed set carrier state and
546 * report to system log */
547 if (priv->last_link_state != linkstate) {
548 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
549 if (netif_msg_link(priv))
550 mlx4_info(mdev, "Port %d - link down\n", priv->port);
551 netif_carrier_off(priv->dev);
552 } else {
553 if (netif_msg_link(priv))
554 mlx4_info(mdev, "Port %d - link up\n", priv->port);
555 netif_carrier_on(priv->dev);
556 }
557 }
558 priv->last_link_state = linkstate;
559 mutex_unlock(&mdev->state_lock);
560}
561
562
563static int mlx4_en_start_port(struct net_device *dev)
564{
565 struct mlx4_en_priv *priv = netdev_priv(dev);
566 struct mlx4_en_dev *mdev = priv->mdev;
567 struct mlx4_en_cq *cq;
568 struct mlx4_en_tx_ring *tx_ring;
569 struct mlx4_en_rx_ring *rx_ring;
570 int rx_index = 0;
571 int tx_index = 0;
572 u16 stride;
573 int err = 0;
574 int i;
575 int j;
576
577 if (priv->port_up) {
578 mlx4_dbg(DRV, priv, "start port called while port already up\n");
579 return 0;
580 }
581
582 /* Calculate Rx buf size */
583 dev->mtu = min(dev->mtu, priv->max_mtu);
584 mlx4_en_calc_rx_buf(dev);
585 mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
586 stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
587 DS_SIZE * priv->num_frags);
588 /* Configure rx cq's and rings */
589 for (i = 0; i < priv->rx_ring_num; i++) {
590 cq = &priv->rx_cq[i];
591 rx_ring = &priv->rx_ring[i];
592
593 err = mlx4_en_activate_cq(priv, cq);
594 if (err) {
595 mlx4_err(mdev, "Failed activating Rx CQ\n");
596 goto rx_err;
597 }
598 for (j = 0; j < cq->size; j++)
599 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
600 err = mlx4_en_set_cq_moder(priv, cq);
601 if (err) {
602 mlx4_err(mdev, "Failed setting cq moderation parameters");
603 mlx4_en_deactivate_cq(priv, cq);
604 goto cq_err;
605 }
606 mlx4_en_arm_cq(priv, cq);
607
608 ++rx_index;
609 }
610
611 err = mlx4_en_activate_rx_rings(priv);
612 if (err) {
613 mlx4_err(mdev, "Failed to activate RX rings\n");
614 goto cq_err;
615 }
616
617 err = mlx4_en_config_rss_steer(priv);
618 if (err) {
619 mlx4_err(mdev, "Failed configuring rss steering\n");
620 goto rx_err;
621 }
622
623 /* Configure tx cq's and rings */
624 for (i = 0; i < priv->tx_ring_num; i++) {
625 /* Configure cq */
626 cq = &priv->tx_cq[i];
627 err = mlx4_en_activate_cq(priv, cq);
628 if (err) {
629 mlx4_err(mdev, "Failed allocating Tx CQ\n");
630 goto tx_err;
631 }
632 err = mlx4_en_set_cq_moder(priv, cq);
633 if (err) {
634 mlx4_err(mdev, "Failed setting cq moderation parameters");
635 mlx4_en_deactivate_cq(priv, cq);
636 goto tx_err;
637 }
638 mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
639 cq->buf->wqe_index = cpu_to_be16(0xffff);
640
641 /* Configure ring */
642 tx_ring = &priv->tx_ring[i];
643 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
644 priv->rx_ring[0].srq.srqn);
645 if (err) {
646 mlx4_err(mdev, "Failed allocating Tx ring\n");
647 mlx4_en_deactivate_cq(priv, cq);
648 goto tx_err;
649 }
650 /* Set initial ownership of all Tx TXBBs to SW (1) */
651 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
652 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
653 ++tx_index;
654 }
655
656 /* Configure port */
657 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
658 priv->rx_skb_size + ETH_FCS_LEN,
659 priv->prof->tx_pause,
660 priv->prof->tx_ppp,
661 priv->prof->rx_pause,
662 priv->prof->rx_ppp);
663 if (err) {
664 mlx4_err(mdev, "Failed setting port general configurations"
665 " for port %d, with error %d\n", priv->port, err);
666 goto tx_err;
667 }
668 /* Set default qp number */
669 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
670 if (err) {
671 mlx4_err(mdev, "Failed setting default qp numbers\n");
672 goto tx_err;
673 }
674 /* Set port mac number */
675 mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
676 err = mlx4_register_mac(mdev->dev, priv->port,
677 priv->mac, &priv->mac_index);
678 if (err) {
679 mlx4_err(mdev, "Failed setting port mac\n");
680 goto tx_err;
681 }
682
683 /* Init port */
684 mlx4_dbg(HW, priv, "Initializing port\n");
685 err = mlx4_INIT_PORT(mdev->dev, priv->port);
686 if (err) {
687 mlx4_err(mdev, "Failed Initializing port\n");
688 goto mac_err;
689 }
690
691 /* Schedule multicast task to populate multicast list */
692 queue_work(mdev->workqueue, &priv->mcast_task);
693
694 priv->port_up = true;
695 netif_start_queue(dev);
696 return 0;
697
698mac_err:
699 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
700tx_err:
701 while (tx_index--) {
702 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
703 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
704 }
705
706 mlx4_en_release_rss_steer(priv);
707rx_err:
708 for (i = 0; i < priv->rx_ring_num; i++)
709 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
710cq_err:
711 while (rx_index--)
712 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
713
714 return err; /* need to close devices */
715}
716
717
718static void mlx4_en_stop_port(struct net_device *dev)
719{
720 struct mlx4_en_priv *priv = netdev_priv(dev);
721 struct mlx4_en_dev *mdev = priv->mdev;
722 int i;
723
724 if (!priv->port_up) {
725 mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n",
726 priv->port);
727 return;
728 }
729 netif_stop_queue(dev);
730
731 /* Synchronize with tx routine */
732 netif_tx_lock_bh(dev);
733 priv->port_up = false;
734 netif_tx_unlock_bh(dev);
735
736 /* close port*/
737 mlx4_CLOSE_PORT(mdev->dev, priv->port);
738
739 /* Unregister Mac address for the port */
740 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
741
742 /* Free TX Rings */
743 for (i = 0; i < priv->tx_ring_num; i++) {
744 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
745 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
746 }
747 msleep(10);
748
749 for (i = 0; i < priv->tx_ring_num; i++)
750 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
751
752 /* Free RSS qps */
753 mlx4_en_release_rss_steer(priv);
754
755 /* Free RX Rings */
756 for (i = 0; i < priv->rx_ring_num; i++) {
757 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
758 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
759 msleep(1);
760 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
761 }
762}
763
764static void mlx4_en_restart(struct work_struct *work)
765{
766 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
767 watchdog_task);
768 struct mlx4_en_dev *mdev = priv->mdev;
769 struct net_device *dev = priv->dev;
770
771 mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
772 mlx4_en_stop_port(dev);
773 if (mlx4_en_start_port(dev))
774 mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
775}
776
777
778static int mlx4_en_open(struct net_device *dev)
779{
780 struct mlx4_en_priv *priv = netdev_priv(dev);
781 struct mlx4_en_dev *mdev = priv->mdev;
782 int i;
783 int err = 0;
784
785 mutex_lock(&mdev->state_lock);
786
787 if (!mdev->device_up) {
788 mlx4_err(mdev, "Cannot open - device down/disabled\n");
789 err = -EBUSY;
790 goto out;
791 }
792
793 /* Reset HW statistics and performance counters */
794 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
795 mlx4_dbg(HW, priv, "Failed dumping statistics\n");
796
797 memset(&priv->stats, 0, sizeof(priv->stats));
798 memset(&priv->pstats, 0, sizeof(priv->pstats));
799
800 for (i = 0; i < priv->tx_ring_num; i++) {
801 priv->tx_ring[i].bytes = 0;
802 priv->tx_ring[i].packets = 0;
803 }
804 for (i = 0; i < priv->rx_ring_num; i++) {
805 priv->rx_ring[i].bytes = 0;
806 priv->rx_ring[i].packets = 0;
807 }
808
809 mlx4_en_set_default_moderation(priv);
810 err = mlx4_en_start_port(dev);
811 if (err)
812 mlx4_err(mdev, "Failed starting port:%d\n", priv->port);
813
814out:
815 mutex_unlock(&mdev->state_lock);
816 return err;
817}
818
819
820static int mlx4_en_close(struct net_device *dev)
821{
822 struct mlx4_en_priv *priv = netdev_priv(dev);
823 struct mlx4_en_dev *mdev = priv->mdev;
824
825 if (netif_msg_ifdown(priv))
826 mlx4_info(mdev, "Close called for port:%d\n", priv->port);
827
828 mutex_lock(&mdev->state_lock);
829
830 mlx4_en_stop_port(dev);
831 netif_carrier_off(dev);
832
833 mutex_unlock(&mdev->state_lock);
834 return 0;
835}
836
837static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
838{
839 int i;
840
841 for (i = 0; i < priv->tx_ring_num; i++) {
842 if (priv->tx_ring[i].tx_info)
843 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
844 if (priv->tx_cq[i].buf)
845 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
846 }
847
848 for (i = 0; i < priv->rx_ring_num; i++) {
849 if (priv->rx_ring[i].rx_info)
850 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
851 if (priv->rx_cq[i].buf)
852 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
853 }
854}
855
856static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
857{
858 struct mlx4_en_dev *mdev = priv->mdev;
859 struct mlx4_en_port_profile *prof = priv->prof;
860 int i;
861
862 /* Create tx Rings */
863 for (i = 0; i < priv->tx_ring_num; i++) {
864 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
865 prof->tx_ring_size, i, TX))
866 goto err;
867
868 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
869 prof->tx_ring_size, TXBB_SIZE))
870 goto err;
871 }
872
873 /* Create rx Rings */
874 for (i = 0; i < priv->rx_ring_num; i++) {
875 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
876 prof->rx_ring_size, i, RX))
877 goto err;
878
879 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
880 prof->rx_ring_size, priv->stride))
881 goto err;
882 }
883
884 return 0;
885
886err:
887 mlx4_err(mdev, "Failed to allocate NIC resources\n");
888 return -ENOMEM;
889}
890
891
892void mlx4_en_destroy_netdev(struct net_device *dev)
893{
894 struct mlx4_en_priv *priv = netdev_priv(dev);
895 struct mlx4_en_dev *mdev = priv->mdev;
896
897 mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
898
899 /* Unregister device - this will close the port if it was up */
900 if (priv->registered)
901 unregister_netdev(dev);
902
903 if (priv->allocated)
904 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
905
906 cancel_delayed_work(&priv->stats_task);
907 cancel_delayed_work(&priv->refill_task);
908 /* flush any pending task for this netdev */
909 flush_workqueue(mdev->workqueue);
910
911 /* Detach the netdev so tasks would not attempt to access it */
912 mutex_lock(&mdev->state_lock);
913 mdev->pndev[priv->port] = NULL;
914 mutex_unlock(&mdev->state_lock);
915
916 mlx4_en_free_resources(priv);
917 free_netdev(dev);
918}
919
920static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
921{
922 struct mlx4_en_priv *priv = netdev_priv(dev);
923 struct mlx4_en_dev *mdev = priv->mdev;
924 int err = 0;
925
926 mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
927 dev->mtu, new_mtu);
928
929 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
930 mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu);
931 return -EPERM;
932 }
933 dev->mtu = new_mtu;
934
935 if (netif_running(dev)) {
936 mutex_lock(&mdev->state_lock);
937 if (!mdev->device_up) {
938 /* NIC is probably restarting - let watchdog task reset
939 * the port */
940 mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n");
941 } else {
942 mlx4_en_stop_port(dev);
943 mlx4_en_set_default_moderation(priv);
944 err = mlx4_en_start_port(dev);
945 if (err) {
946 mlx4_err(mdev, "Failed restarting port:%d\n",
947 priv->port);
948 queue_work(mdev->workqueue, &priv->watchdog_task);
949 }
950 }
951 mutex_unlock(&mdev->state_lock);
952 }
953 return 0;
954}
955
956int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
957 struct mlx4_en_port_profile *prof)
958{
959 struct net_device *dev;
960 struct mlx4_en_priv *priv;
961 int i;
962 int err;
963
964 dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
965 if (dev == NULL) {
966 mlx4_err(mdev, "Net device allocation failed\n");
967 return -ENOMEM;
968 }
969
970 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
971
972 /*
973 * Initialize driver private data
974 */
975
976 priv = netdev_priv(dev);
977 memset(priv, 0, sizeof(struct mlx4_en_priv));
978 priv->dev = dev;
979 priv->mdev = mdev;
980 priv->prof = prof;
981 priv->port = port;
982 priv->port_up = false;
983 priv->rx_csum = 1;
984 priv->flags = prof->flags;
985 priv->tx_ring_num = prof->tx_ring_num;
986 priv->rx_ring_num = prof->rx_ring_num;
987 priv->mc_list = NULL;
988 priv->mac_index = -1;
989 priv->msg_enable = MLX4_EN_MSG_LEVEL;
990 spin_lock_init(&priv->stats_lock);
991 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
992 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
993 INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
994 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
995 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
996 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
997
998 /* Query for default mac and max mtu */
999 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1000 priv->mac = mdev->dev->caps.def_mac[priv->port];
1001 if (ILLEGAL_MAC(priv->mac)) {
1002 mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1003 priv->port, priv->mac);
1004 err = -EINVAL;
1005 goto out;
1006 }
1007
1008 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
1009 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
1010 err = mlx4_en_alloc_resources(priv);
1011 if (err)
1012 goto out;
1013
1014 /* Populate Rx default RSS mappings */
1015 mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num *
1016 RSS_FACTOR, priv->rx_ring_num);
1017 /* Allocate page for receive rings */
1018 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1019 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1020 if (err) {
1021 mlx4_err(mdev, "Failed to allocate page for rx qps\n");
1022 goto out;
1023 }
1024 priv->allocated = 1;
1025
1026 /* Populate Tx priority mappings */
1027 mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
1028
1029 /*
1030 * Initialize netdev entry points
1031 */
1032
1033 dev->open = &mlx4_en_open;
1034 dev->stop = &mlx4_en_close;
1035 dev->hard_start_xmit = &mlx4_en_xmit;
1036 dev->get_stats = &mlx4_en_get_stats;
1037 dev->set_multicast_list = &mlx4_en_set_multicast;
1038 dev->set_mac_address = &mlx4_en_set_mac;
1039 dev->change_mtu = &mlx4_en_change_mtu;
1040 dev->tx_timeout = &mlx4_en_tx_timeout;
1041 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1042 dev->vlan_rx_register = mlx4_en_vlan_rx_register;
1043 dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid;
1044 dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid;
1045#ifdef CONFIG_NET_POLL_CONTROLLER
1046 dev->poll_controller = mlx4_en_netpoll;
1047#endif
1048 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1049
1050 /* Set defualt MAC */
1051 dev->addr_len = ETH_ALEN;
1052 for (i = 0; i < ETH_ALEN; i++)
1053 dev->dev_addr[ETH_ALEN - 1 - i] =
1054 (u8) (priv->mac >> (8 * i));
1055
1056 /*
1057 * Set driver features
1058 */
1059 dev->features |= NETIF_F_SG;
1060 dev->features |= NETIF_F_HW_CSUM;
1061 dev->features |= NETIF_F_HIGHDMA;
1062 dev->features |= NETIF_F_HW_VLAN_TX |
1063 NETIF_F_HW_VLAN_RX |
1064 NETIF_F_HW_VLAN_FILTER;
1065 if (mdev->profile.num_lro)
1066 dev->features |= NETIF_F_LRO;
1067 if (mdev->LSO_support) {
1068 dev->features |= NETIF_F_TSO;
1069 dev->features |= NETIF_F_TSO6;
1070 }
1071
1072 mdev->pndev[port] = dev;
1073
1074 netif_carrier_off(dev);
1075 err = register_netdev(dev);
1076 if (err) {
1077 mlx4_err(mdev, "Netdev registration failed\n");
1078 goto out;
1079 }
1080 priv->registered = 1;
1081 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1082 return 0;
1083
1084out:
1085 mlx4_en_destroy_netdev(dev);
1086 return err;
1087}
1088
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
new file mode 100644
index 000000000000..95706ee1c019
--- /dev/null
+++ b/drivers/net/mlx4/en_params.c
@@ -0,0 +1,482 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h>
37
38#include "mlx4_en.h"
39#include "en_port.h"
40
41#define MLX4_EN_PARM_INT(X, def_val, desc) \
42 static unsigned int X = def_val;\
43 module_param(X , uint, 0444); \
44 MODULE_PARM_DESC(X, desc);
45
46
47/*
48 * Device scope module parameters
49 */
50
51
52/* Use a XOR rathern than Toeplitz hash function for RSS */
53MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
54
55/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
56MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
57
58/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
59MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
60 "Number of LRO sessions per ring or disabled (0)");
61
62/* Priority pausing */
63MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
64 "Pause policy on TX: 0 never generate pause frames "
65 "1 generate pause frames according to RX buffer threshold");
66MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
67 "Pause policy on RX: 0 ignore received pause frames "
68 "1 respect received pause frames");
69MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
70 " Per priority bit mask");
71MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
72 " Per priority bit mask");
73
74/* Interrupt moderation tunning */
75MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
76 "Max coalesced descriptors for Rx interrupt moderation");
77MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
78 "Timeout following last packet for Rx interrupt moderation");
79MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
80
81MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
82MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
83
84MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
85MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
86MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
87MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
88
89
90int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
91{
92 struct mlx4_en_profile *params = &mdev->profile;
93 int i;
94
95 params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
96 params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
97 params->auto_moder = auto_moder;
98 params->rss_xor = (rss_xor != 0);
99 params->rss_mask = rss_mask & 0x1f;
100 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
101 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
102 params->prof[i].rx_pause = pprx;
103 params->prof[i].rx_ppp = pfcrx;
104 params->prof[i].tx_pause = pptx;
105 params->prof[i].tx_ppp = pfctx;
106 }
107 if (pfcrx || pfctx) {
108 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
109 params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
110 } else {
111 params->prof[1].tx_ring_num = 1;
112 params->prof[2].tx_ring_num = 1;
113 }
114 params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
115 params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
116
117 if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
118 tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
119 params->prof[1].tx_ring_size =
120 (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
121 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
122
123 if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
124 tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
125 params->prof[2].tx_ring_size =
126 (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
127 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
128
129 if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
130 rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
131 params->prof[1].rx_ring_size =
132 (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
133 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
134
135 if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
136 rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
137 params->prof[2].rx_ring_size =
138 (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
139 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
140 return 0;
141}
142
143
144/*
145 * Ethtool support
146 */
147
148static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
149{
150 int i;
151
152 priv->port_stats.lro_aggregated = 0;
153 priv->port_stats.lro_flushed = 0;
154 priv->port_stats.lro_no_desc = 0;
155
156 for (i = 0; i < priv->rx_ring_num; i++) {
157 priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
158 priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
159 priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
160 }
161}
162
163static void
164mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
165{
166 struct mlx4_en_priv *priv = netdev_priv(dev);
167 struct mlx4_en_dev *mdev = priv->mdev;
168
169 sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
170 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
171 sprintf(drvinfo->fw_version, "%d.%d.%d",
172 (u16) (mdev->dev->caps.fw_ver >> 32),
173 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
174 (u16) (mdev->dev->caps.fw_ver & 0xffff));
175 strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
176 drvinfo->n_stats = 0;
177 drvinfo->regdump_len = 0;
178 drvinfo->eedump_len = 0;
179}
180
181static u32 mlx4_en_get_tso(struct net_device *dev)
182{
183 return (dev->features & NETIF_F_TSO) != 0;
184}
185
186static int mlx4_en_set_tso(struct net_device *dev, u32 data)
187{
188 struct mlx4_en_priv *priv = netdev_priv(dev);
189
190 if (data) {
191 if (!priv->mdev->LSO_support)
192 return -EPERM;
193 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
194 } else
195 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
196 return 0;
197}
198
199static u32 mlx4_en_get_rx_csum(struct net_device *dev)
200{
201 struct mlx4_en_priv *priv = netdev_priv(dev);
202 return priv->rx_csum;
203}
204
205static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
206{
207 struct mlx4_en_priv *priv = netdev_priv(dev);
208 priv->rx_csum = (data != 0);
209 return 0;
210}
211
212static const char main_strings[][ETH_GSTRING_LEN] = {
213 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
214 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
215 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
216 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
217 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
218 "tx_heartbeat_errors", "tx_window_errors",
219
220 /* port statistics */
221 "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
222 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
223 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
224
225 /* packet statistics */
226 "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
227 "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
228 "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
229 "tx_prio_6", "tx_prio_7",
230};
231#define NUM_MAIN_STATS 21
232#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
233
234static u32 mlx4_en_get_msglevel(struct net_device *dev)
235{
236 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
237}
238
239static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
240{
241 ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
242}
243
244static void mlx4_en_get_wol(struct net_device *netdev,
245 struct ethtool_wolinfo *wol)
246{
247 wol->supported = 0;
248 wol->wolopts = 0;
249
250 return;
251}
252
253static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
254{
255 struct mlx4_en_priv *priv = netdev_priv(dev);
256
257 if (sset != ETH_SS_STATS)
258 return -EOPNOTSUPP;
259
260 return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
261}
262
263static void mlx4_en_get_ethtool_stats(struct net_device *dev,
264 struct ethtool_stats *stats, uint64_t *data)
265{
266 struct mlx4_en_priv *priv = netdev_priv(dev);
267 int index = 0;
268 int i;
269
270 spin_lock_bh(&priv->stats_lock);
271
272 mlx4_en_update_lro_stats(priv);
273
274 for (i = 0; i < NUM_MAIN_STATS; i++)
275 data[index++] = ((unsigned long *) &priv->stats)[i];
276 for (i = 0; i < NUM_PORT_STATS; i++)
277 data[index++] = ((unsigned long *) &priv->port_stats)[i];
278 for (i = 0; i < priv->tx_ring_num; i++) {
279 data[index++] = priv->tx_ring[i].packets;
280 data[index++] = priv->tx_ring[i].bytes;
281 }
282 for (i = 0; i < priv->rx_ring_num; i++) {
283 data[index++] = priv->rx_ring[i].packets;
284 data[index++] = priv->rx_ring[i].bytes;
285 }
286 for (i = 0; i < NUM_PKT_STATS; i++)
287 data[index++] = ((unsigned long *) &priv->pkstats)[i];
288 spin_unlock_bh(&priv->stats_lock);
289
290}
291
292static void mlx4_en_get_strings(struct net_device *dev,
293 uint32_t stringset, uint8_t *data)
294{
295 struct mlx4_en_priv *priv = netdev_priv(dev);
296 int index = 0;
297 int i;
298
299 if (stringset != ETH_SS_STATS)
300 return;
301
302 /* Add main counters */
303 for (i = 0; i < NUM_MAIN_STATS; i++)
304 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
305 for (i = 0; i < NUM_PORT_STATS; i++)
306 strcpy(data + (index++) * ETH_GSTRING_LEN,
307 main_strings[i + NUM_MAIN_STATS]);
308 for (i = 0; i < priv->tx_ring_num; i++) {
309 sprintf(data + (index++) * ETH_GSTRING_LEN,
310 "tx%d_packets", i);
311 sprintf(data + (index++) * ETH_GSTRING_LEN,
312 "tx%d_bytes", i);
313 }
314 for (i = 0; i < priv->rx_ring_num; i++) {
315 sprintf(data + (index++) * ETH_GSTRING_LEN,
316 "rx%d_packets", i);
317 sprintf(data + (index++) * ETH_GSTRING_LEN,
318 "rx%d_bytes", i);
319 }
320 for (i = 0; i < NUM_PKT_STATS; i++)
321 strcpy(data + (index++) * ETH_GSTRING_LEN,
322 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
323}
324
325static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
326{
327 cmd->autoneg = AUTONEG_DISABLE;
328 cmd->supported = SUPPORTED_10000baseT_Full;
329 cmd->advertising = SUPPORTED_10000baseT_Full;
330 if (netif_carrier_ok(dev)) {
331 cmd->speed = SPEED_10000;
332 cmd->duplex = DUPLEX_FULL;
333 } else {
334 cmd->speed = -1;
335 cmd->duplex = -1;
336 }
337 return 0;
338}
339
340static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
341{
342 if ((cmd->autoneg == AUTONEG_ENABLE) ||
343 (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
344 return -EINVAL;
345
346 /* Nothing to change */
347 return 0;
348}
349
350static int mlx4_en_get_coalesce(struct net_device *dev,
351 struct ethtool_coalesce *coal)
352{
353 struct mlx4_en_priv *priv = netdev_priv(dev);
354
355 coal->tx_coalesce_usecs = 0;
356 coal->tx_max_coalesced_frames = 0;
357 coal->rx_coalesce_usecs = priv->rx_usecs;
358 coal->rx_max_coalesced_frames = priv->rx_frames;
359
360 coal->pkt_rate_low = priv->pkt_rate_low;
361 coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
362 coal->pkt_rate_high = priv->pkt_rate_high;
363 coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
364 coal->rate_sample_interval = priv->sample_interval;
365 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
366 return 0;
367}
368
369static int mlx4_en_set_coalesce(struct net_device *dev,
370 struct ethtool_coalesce *coal)
371{
372 struct mlx4_en_priv *priv = netdev_priv(dev);
373 int err, i;
374
375 priv->rx_frames = (coal->rx_max_coalesced_frames ==
376 MLX4_EN_AUTO_CONF) ?
377 MLX4_EN_RX_COAL_TARGET /
378 priv->dev->mtu + 1 :
379 coal->rx_max_coalesced_frames;
380 priv->rx_usecs = (coal->rx_coalesce_usecs ==
381 MLX4_EN_AUTO_CONF) ?
382 MLX4_EN_RX_COAL_TIME :
383 coal->rx_coalesce_usecs;
384
385 /* Set adaptive coalescing params */
386 priv->pkt_rate_low = coal->pkt_rate_low;
387 priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
388 priv->pkt_rate_high = coal->pkt_rate_high;
389 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
390 priv->sample_interval = coal->rate_sample_interval;
391 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
392 priv->last_moder_time = MLX4_EN_AUTO_CONF;
393 if (priv->adaptive_rx_coal)
394 return 0;
395
396 for (i = 0; i < priv->rx_ring_num; i++) {
397 priv->rx_cq[i].moder_cnt = priv->rx_frames;
398 priv->rx_cq[i].moder_time = priv->rx_usecs;
399 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
400 if (err)
401 return err;
402 }
403 return 0;
404}
405
406static int mlx4_en_set_pauseparam(struct net_device *dev,
407 struct ethtool_pauseparam *pause)
408{
409 struct mlx4_en_priv *priv = netdev_priv(dev);
410 struct mlx4_en_dev *mdev = priv->mdev;
411 int err;
412
413 priv->prof->tx_pause = pause->tx_pause != 0;
414 priv->prof->rx_pause = pause->rx_pause != 0;
415 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
416 priv->rx_skb_size + ETH_FCS_LEN,
417 priv->prof->tx_pause,
418 priv->prof->tx_ppp,
419 priv->prof->rx_pause,
420 priv->prof->rx_ppp);
421 if (err)
422 mlx4_err(mdev, "Failed setting pause params to\n");
423
424 return err;
425}
426
427static void mlx4_en_get_pauseparam(struct net_device *dev,
428 struct ethtool_pauseparam *pause)
429{
430 struct mlx4_en_priv *priv = netdev_priv(dev);
431
432 pause->tx_pause = priv->prof->tx_pause;
433 pause->rx_pause = priv->prof->rx_pause;
434}
435
436static void mlx4_en_get_ringparam(struct net_device *dev,
437 struct ethtool_ringparam *param)
438{
439 struct mlx4_en_priv *priv = netdev_priv(dev);
440 struct mlx4_en_dev *mdev = priv->mdev;
441
442 memset(param, 0, sizeof(*param));
443 param->rx_max_pending = mdev->dev->caps.max_rq_sg;
444 param->tx_max_pending = mdev->dev->caps.max_sq_sg;
445 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
446 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
447}
448
449const struct ethtool_ops mlx4_en_ethtool_ops = {
450 .get_drvinfo = mlx4_en_get_drvinfo,
451 .get_settings = mlx4_en_get_settings,
452 .set_settings = mlx4_en_set_settings,
453#ifdef NETIF_F_TSO
454 .get_tso = mlx4_en_get_tso,
455 .set_tso = mlx4_en_set_tso,
456#endif
457 .get_sg = ethtool_op_get_sg,
458 .set_sg = ethtool_op_set_sg,
459 .get_link = ethtool_op_get_link,
460 .get_rx_csum = mlx4_en_get_rx_csum,
461 .set_rx_csum = mlx4_en_set_rx_csum,
462 .get_tx_csum = ethtool_op_get_tx_csum,
463 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
464 .get_strings = mlx4_en_get_strings,
465 .get_sset_count = mlx4_en_get_sset_count,
466 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
467 .get_wol = mlx4_en_get_wol,
468 .get_msglevel = mlx4_en_get_msglevel,
469 .set_msglevel = mlx4_en_set_msglevel,
470 .get_coalesce = mlx4_en_get_coalesce,
471 .set_coalesce = mlx4_en_set_coalesce,
472 .get_pauseparam = mlx4_en_get_pauseparam,
473 .set_pauseparam = mlx4_en_set_pauseparam,
474 .get_ringparam = mlx4_en_get_ringparam,
475 .get_flags = ethtool_op_get_flags,
476 .set_flags = ethtool_op_set_flags,
477};
478
479
480
481
482
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
new file mode 100644
index 000000000000..c5a4c0389752
--- /dev/null
+++ b/drivers/net/mlx4/en_port.c
@@ -0,0 +1,261 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34
35#include <linux/if_vlan.h>
36
37#include <linux/mlx4/device.h>
38#include <linux/mlx4/cmd.h>
39
40#include "en_port.h"
41#include "mlx4_en.h"
42
43
44int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
45 u64 mac, u64 clear, u8 mode)
46{
47 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
48 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
49}
50
51int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
52{
53 struct mlx4_cmd_mailbox *mailbox;
54 struct mlx4_set_vlan_fltr_mbox *filter;
55 int i;
56 int j;
57 int index = 0;
58 u32 entry;
59 int err = 0;
60
61 mailbox = mlx4_alloc_cmd_mailbox(dev);
62 if (IS_ERR(mailbox))
63 return PTR_ERR(mailbox);
64
65 filter = mailbox->buf;
66 if (grp) {
67 memset(filter, 0, sizeof *filter);
68 for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
69 entry = 0;
70 for (j = 0; j < 32; j++)
71 if (vlan_group_get_device(grp, index++))
72 entry |= 1 << j;
73 filter->entry[i] = cpu_to_be32(entry);
74 }
75 } else {
76 /* When no vlans are configured we block all vlans */
77 memset(filter, 0, sizeof(*filter));
78 }
79 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
80 MLX4_CMD_TIME_CLASS_B);
81 mlx4_free_cmd_mailbox(dev, mailbox);
82 return err;
83}
84
85
86int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
87 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
88{
89 struct mlx4_cmd_mailbox *mailbox;
90 struct mlx4_set_port_general_context *context;
91 int err;
92 u32 in_mod;
93
94 mailbox = mlx4_alloc_cmd_mailbox(dev);
95 if (IS_ERR(mailbox))
96 return PTR_ERR(mailbox);
97 context = mailbox->buf;
98 memset(context, 0, sizeof *context);
99
100 context->flags = SET_PORT_GEN_ALL_VALID;
101 context->mtu = cpu_to_be16(mtu);
102 context->pptx = (pptx * (!pfctx)) << 7;
103 context->pfctx = pfctx;
104 context->pprx = (pprx * (!pfcrx)) << 7;
105 context->pfcrx = pfcrx;
106
107 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
108 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
109 MLX4_CMD_TIME_CLASS_B);
110
111 mlx4_free_cmd_mailbox(dev, mailbox);
112 return err;
113}
114
115int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
116 u8 promisc)
117{
118 struct mlx4_cmd_mailbox *mailbox;
119 struct mlx4_set_port_rqp_calc_context *context;
120 int err;
121 u32 in_mod;
122
123 mailbox = mlx4_alloc_cmd_mailbox(dev);
124 if (IS_ERR(mailbox))
125 return PTR_ERR(mailbox);
126 context = mailbox->buf;
127 memset(context, 0, sizeof *context);
128
129 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
132 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0;
135 context->vlan_miss = MLX4_VLAN_MISS_IDX;
136
137 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
138 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
139 MLX4_CMD_TIME_CLASS_B);
140
141 mlx4_free_cmd_mailbox(dev, mailbox);
142 return err;
143}
144
145
146int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
147{
148 struct mlx4_en_stat_out_mbox *mlx4_en_stats;
149 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
150 struct net_device_stats *stats = &priv->stats;
151 struct mlx4_cmd_mailbox *mailbox;
152 u64 in_mod = reset << 8 | port;
153 int err;
154
155 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
156 if (IS_ERR(mailbox))
157 return PTR_ERR(mailbox);
158 memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
159 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
160 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
161 if (err)
162 goto out;
163
164 mlx4_en_stats = mailbox->buf;
165
166 spin_lock_bh(&priv->stats_lock);
167
168 stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
169 be32_to_cpu(mlx4_en_stats->RDROP);
170 stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
171 be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
172 be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
173 be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
174 be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
175 be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
176 be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
177 be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
178 be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
179 be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
180 stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
181 be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
182 be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
183 be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
184 be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
185 be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
186 be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
187 be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
188 be64_to_cpu(mlx4_en_stats->ROCT_novlan);
189
190 stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
191 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
192 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
193 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
194 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
195 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
196 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
197 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
198 be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
199 be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
200
201 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
202 be32_to_cpu(mlx4_en_stats->RdropLength) +
203 be32_to_cpu(mlx4_en_stats->RJBBR) +
204 be32_to_cpu(mlx4_en_stats->RCRC) +
205 be32_to_cpu(mlx4_en_stats->RRUNT);
206 stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
207 stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
208 be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
209 be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
210 be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
211 be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
212 be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
213 be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
214 be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
215 be64_to_cpu(mlx4_en_stats->MCAST_novlan);
216 stats->collisions = 0;
217 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
218 stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
219 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
220 stats->rx_frame_errors = 0;
221 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
222 stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
223 stats->tx_aborted_errors = 0;
224 stats->tx_carrier_errors = 0;
225 stats->tx_fifo_errors = 0;
226 stats->tx_heartbeat_errors = 0;
227 stats->tx_window_errors = 0;
228
229 priv->pkstats.broadcast =
230 be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
231 be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
232 be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
233 be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
234 be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
235 be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
236 be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
237 be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
238 be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
239 priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
240 priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
241 priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
242 priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
243 priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
244 priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
245 priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
246 priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
247 priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
248 priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
249 priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
250 priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
251 priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
252 priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
253 priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
254 priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
255 spin_unlock_bh(&priv->stats_lock);
256
257out:
258 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
259 return err;
260}
261
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
new file mode 100644
index 000000000000..e6477f12beb5
--- /dev/null
+++ b/drivers/net/mlx4/en_port.h
@@ -0,0 +1,570 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef _MLX4_EN_PORT_H_
35#define _MLX4_EN_PORT_H_
36
37
38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_SHIFT 31
40
41enum {
42 MLX4_CMD_SET_VLAN_FLTR = 0x47,
43 MLX4_CMD_SET_MCAST_FLTR = 0x48,
44 MLX4_CMD_DUMP_ETH_STATS = 0x49,
45};
46
47struct mlx4_set_port_general_context {
48 u8 reserved[3];
49 u8 flags;
50 u16 reserved2;
51 __be16 mtu;
52 u8 pptx;
53 u8 pfctx;
54 u16 reserved3;
55 u8 pprx;
56 u8 pfcrx;
57 u16 reserved4;
58};
59
60struct mlx4_set_port_rqp_calc_context {
61 __be32 base_qpn;
62 __be32 flags;
63 u8 reserved[3];
64 u8 mac_miss;
65 u8 intra_no_vlan;
66 u8 no_vlan;
67 u8 intra_vlan_miss;
68 u8 vlan_miss;
69 u8 reserved2[3];
70 u8 no_vlan_prio;
71 __be32 promisc;
72 __be32 mcast;
73};
74
75#define VLAN_FLTR_SIZE 128
76struct mlx4_set_vlan_fltr_mbox {
77 __be32 entry[VLAN_FLTR_SIZE];
78};
79
80
81enum {
82 MLX4_MCAST_CONFIG = 0,
83 MLX4_MCAST_DISABLE = 1,
84 MLX4_MCAST_ENABLE = 2,
85};
86
87
88struct mlx4_en_stat_out_mbox {
89 /* Received frames with a length of 64 octets */
90 __be64 R64_prio_0;
91 __be64 R64_prio_1;
92 __be64 R64_prio_2;
93 __be64 R64_prio_3;
94 __be64 R64_prio_4;
95 __be64 R64_prio_5;
96 __be64 R64_prio_6;
97 __be64 R64_prio_7;
98 __be64 R64_novlan;
99 /* Received frames with a length of 127 octets */
100 __be64 R127_prio_0;
101 __be64 R127_prio_1;
102 __be64 R127_prio_2;
103 __be64 R127_prio_3;
104 __be64 R127_prio_4;
105 __be64 R127_prio_5;
106 __be64 R127_prio_6;
107 __be64 R127_prio_7;
108 __be64 R127_novlan;
109 /* Received frames with a length of 255 octets */
110 __be64 R255_prio_0;
111 __be64 R255_prio_1;
112 __be64 R255_prio_2;
113 __be64 R255_prio_3;
114 __be64 R255_prio_4;
115 __be64 R255_prio_5;
116 __be64 R255_prio_6;
117 __be64 R255_prio_7;
118 __be64 R255_novlan;
119 /* Received frames with a length of 511 octets */
120 __be64 R511_prio_0;
121 __be64 R511_prio_1;
122 __be64 R511_prio_2;
123 __be64 R511_prio_3;
124 __be64 R511_prio_4;
125 __be64 R511_prio_5;
126 __be64 R511_prio_6;
127 __be64 R511_prio_7;
128 __be64 R511_novlan;
129 /* Received frames with a length of 1023 octets */
130 __be64 R1023_prio_0;
131 __be64 R1023_prio_1;
132 __be64 R1023_prio_2;
133 __be64 R1023_prio_3;
134 __be64 R1023_prio_4;
135 __be64 R1023_prio_5;
136 __be64 R1023_prio_6;
137 __be64 R1023_prio_7;
138 __be64 R1023_novlan;
139 /* Received frames with a length of 1518 octets */
140 __be64 R1518_prio_0;
141 __be64 R1518_prio_1;
142 __be64 R1518_prio_2;
143 __be64 R1518_prio_3;
144 __be64 R1518_prio_4;
145 __be64 R1518_prio_5;
146 __be64 R1518_prio_6;
147 __be64 R1518_prio_7;
148 __be64 R1518_novlan;
149 /* Received frames with a length of 1522 octets */
150 __be64 R1522_prio_0;
151 __be64 R1522_prio_1;
152 __be64 R1522_prio_2;
153 __be64 R1522_prio_3;
154 __be64 R1522_prio_4;
155 __be64 R1522_prio_5;
156 __be64 R1522_prio_6;
157 __be64 R1522_prio_7;
158 __be64 R1522_novlan;
159 /* Received frames with a length of 1548 octets */
160 __be64 R1548_prio_0;
161 __be64 R1548_prio_1;
162 __be64 R1548_prio_2;
163 __be64 R1548_prio_3;
164 __be64 R1548_prio_4;
165 __be64 R1548_prio_5;
166 __be64 R1548_prio_6;
167 __be64 R1548_prio_7;
168 __be64 R1548_novlan;
169 /* Received frames with a length of 1548 < octets < MTU */
170 __be64 R2MTU_prio_0;
171 __be64 R2MTU_prio_1;
172 __be64 R2MTU_prio_2;
173 __be64 R2MTU_prio_3;
174 __be64 R2MTU_prio_4;
175 __be64 R2MTU_prio_5;
176 __be64 R2MTU_prio_6;
177 __be64 R2MTU_prio_7;
178 __be64 R2MTU_novlan;
179 /* Received frames with a length of MTU< octets and good CRC */
180 __be64 RGIANT_prio_0;
181 __be64 RGIANT_prio_1;
182 __be64 RGIANT_prio_2;
183 __be64 RGIANT_prio_3;
184 __be64 RGIANT_prio_4;
185 __be64 RGIANT_prio_5;
186 __be64 RGIANT_prio_6;
187 __be64 RGIANT_prio_7;
188 __be64 RGIANT_novlan;
189 /* Received broadcast frames with good CRC */
190 __be64 RBCAST_prio_0;
191 __be64 RBCAST_prio_1;
192 __be64 RBCAST_prio_2;
193 __be64 RBCAST_prio_3;
194 __be64 RBCAST_prio_4;
195 __be64 RBCAST_prio_5;
196 __be64 RBCAST_prio_6;
197 __be64 RBCAST_prio_7;
198 __be64 RBCAST_novlan;
199 /* Received multicast frames with good CRC */
200 __be64 MCAST_prio_0;
201 __be64 MCAST_prio_1;
202 __be64 MCAST_prio_2;
203 __be64 MCAST_prio_3;
204 __be64 MCAST_prio_4;
205 __be64 MCAST_prio_5;
206 __be64 MCAST_prio_6;
207 __be64 MCAST_prio_7;
208 __be64 MCAST_novlan;
209 /* Received unicast not short or GIANT frames with good CRC */
210 __be64 RTOTG_prio_0;
211 __be64 RTOTG_prio_1;
212 __be64 RTOTG_prio_2;
213 __be64 RTOTG_prio_3;
214 __be64 RTOTG_prio_4;
215 __be64 RTOTG_prio_5;
216 __be64 RTOTG_prio_6;
217 __be64 RTOTG_prio_7;
218 __be64 RTOTG_novlan;
219
220 /* Count of total octets of received frames, includes framing characters */
221 __be64 RTTLOCT_prio_0;
222 /* Count of total octets of received frames, not including framing
223 characters */
224 __be64 RTTLOCT_NOFRM_prio_0;
225 /* Count of Total number of octets received
226 (only for frames without errors) */
227 __be64 ROCT_prio_0;
228
229 __be64 RTTLOCT_prio_1;
230 __be64 RTTLOCT_NOFRM_prio_1;
231 __be64 ROCT_prio_1;
232
233 __be64 RTTLOCT_prio_2;
234 __be64 RTTLOCT_NOFRM_prio_2;
235 __be64 ROCT_prio_2;
236
237 __be64 RTTLOCT_prio_3;
238 __be64 RTTLOCT_NOFRM_prio_3;
239 __be64 ROCT_prio_3;
240
241 __be64 RTTLOCT_prio_4;
242 __be64 RTTLOCT_NOFRM_prio_4;
243 __be64 ROCT_prio_4;
244
245 __be64 RTTLOCT_prio_5;
246 __be64 RTTLOCT_NOFRM_prio_5;
247 __be64 ROCT_prio_5;
248
249 __be64 RTTLOCT_prio_6;
250 __be64 RTTLOCT_NOFRM_prio_6;
251 __be64 ROCT_prio_6;
252
253 __be64 RTTLOCT_prio_7;
254 __be64 RTTLOCT_NOFRM_prio_7;
255 __be64 ROCT_prio_7;
256
257 __be64 RTTLOCT_novlan;
258 __be64 RTTLOCT_NOFRM_novlan;
259 __be64 ROCT_novlan;
260
261 /* Count of Total received frames including bad frames */
262 __be64 RTOT_prio_0;
263 /* Count of Total number of received frames with 802.1Q encapsulation */
264 __be64 R1Q_prio_0;
265 __be64 reserved1;
266
267 __be64 RTOT_prio_1;
268 __be64 R1Q_prio_1;
269 __be64 reserved2;
270
271 __be64 RTOT_prio_2;
272 __be64 R1Q_prio_2;
273 __be64 reserved3;
274
275 __be64 RTOT_prio_3;
276 __be64 R1Q_prio_3;
277 __be64 reserved4;
278
279 __be64 RTOT_prio_4;
280 __be64 R1Q_prio_4;
281 __be64 reserved5;
282
283 __be64 RTOT_prio_5;
284 __be64 R1Q_prio_5;
285 __be64 reserved6;
286
287 __be64 RTOT_prio_6;
288 __be64 R1Q_prio_6;
289 __be64 reserved7;
290
291 __be64 RTOT_prio_7;
292 __be64 R1Q_prio_7;
293 __be64 reserved8;
294
295 __be64 RTOT_novlan;
296 __be64 R1Q_novlan;
297 __be64 reserved9;
298
299 /* Total number of Successfully Received Control Frames */
300 __be64 RCNTL;
301 __be64 reserved10;
302 __be64 reserved11;
303 __be64 reserved12;
304 /* Count of received frames with a length/type field value between 46
305 (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
306 inclusive */
307 __be64 RInRangeLengthErr;
308 /* Count of received frames with length/type field between 1501 and 1535
309 decimal, inclusive */
310 __be64 ROutRangeLengthErr;
311 /* Count of received frames that are longer than max allowed size for
312 802.3 frames (1518/1522) */
313 __be64 RFrmTooLong;
314 /* Count frames received with PCS error */
315 __be64 PCS;
316
317 /* Transmit frames with a length of 64 octets */
318 __be64 T64_prio_0;
319 __be64 T64_prio_1;
320 __be64 T64_prio_2;
321 __be64 T64_prio_3;
322 __be64 T64_prio_4;
323 __be64 T64_prio_5;
324 __be64 T64_prio_6;
325 __be64 T64_prio_7;
326 __be64 T64_novlan;
327 __be64 T64_loopbk;
328 /* Transmit frames with a length of 65 to 127 octets. */
329 __be64 T127_prio_0;
330 __be64 T127_prio_1;
331 __be64 T127_prio_2;
332 __be64 T127_prio_3;
333 __be64 T127_prio_4;
334 __be64 T127_prio_5;
335 __be64 T127_prio_6;
336 __be64 T127_prio_7;
337 __be64 T127_novlan;
338 __be64 T127_loopbk;
339 /* Transmit frames with a length of 128 to 255 octets */
340 __be64 T255_prio_0;
341 __be64 T255_prio_1;
342 __be64 T255_prio_2;
343 __be64 T255_prio_3;
344 __be64 T255_prio_4;
345 __be64 T255_prio_5;
346 __be64 T255_prio_6;
347 __be64 T255_prio_7;
348 __be64 T255_novlan;
349 __be64 T255_loopbk;
350 /* Transmit frames with a length of 256 to 511 octets */
351 __be64 T511_prio_0;
352 __be64 T511_prio_1;
353 __be64 T511_prio_2;
354 __be64 T511_prio_3;
355 __be64 T511_prio_4;
356 __be64 T511_prio_5;
357 __be64 T511_prio_6;
358 __be64 T511_prio_7;
359 __be64 T511_novlan;
360 __be64 T511_loopbk;
361 /* Transmit frames with a length of 512 to 1023 octets */
362 __be64 T1023_prio_0;
363 __be64 T1023_prio_1;
364 __be64 T1023_prio_2;
365 __be64 T1023_prio_3;
366 __be64 T1023_prio_4;
367 __be64 T1023_prio_5;
368 __be64 T1023_prio_6;
369 __be64 T1023_prio_7;
370 __be64 T1023_novlan;
371 __be64 T1023_loopbk;
372 /* Transmit frames with a length of 1024 to 1518 octets */
373 __be64 T1518_prio_0;
374 __be64 T1518_prio_1;
375 __be64 T1518_prio_2;
376 __be64 T1518_prio_3;
377 __be64 T1518_prio_4;
378 __be64 T1518_prio_5;
379 __be64 T1518_prio_6;
380 __be64 T1518_prio_7;
381 __be64 T1518_novlan;
382 __be64 T1518_loopbk;
383 /* Counts transmit frames with a length of 1519 to 1522 bytes */
384 __be64 T1522_prio_0;
385 __be64 T1522_prio_1;
386 __be64 T1522_prio_2;
387 __be64 T1522_prio_3;
388 __be64 T1522_prio_4;
389 __be64 T1522_prio_5;
390 __be64 T1522_prio_6;
391 __be64 T1522_prio_7;
392 __be64 T1522_novlan;
393 __be64 T1522_loopbk;
394 /* Transmit frames with a length of 1523 to 1548 octets */
395 __be64 T1548_prio_0;
396 __be64 T1548_prio_1;
397 __be64 T1548_prio_2;
398 __be64 T1548_prio_3;
399 __be64 T1548_prio_4;
400 __be64 T1548_prio_5;
401 __be64 T1548_prio_6;
402 __be64 T1548_prio_7;
403 __be64 T1548_novlan;
404 __be64 T1548_loopbk;
405 /* Counts transmit frames with a length of 1549 to MTU bytes */
406 __be64 T2MTU_prio_0;
407 __be64 T2MTU_prio_1;
408 __be64 T2MTU_prio_2;
409 __be64 T2MTU_prio_3;
410 __be64 T2MTU_prio_4;
411 __be64 T2MTU_prio_5;
412 __be64 T2MTU_prio_6;
413 __be64 T2MTU_prio_7;
414 __be64 T2MTU_novlan;
415 __be64 T2MTU_loopbk;
416 /* Transmit frames with a length greater than MTU octets and a good CRC. */
417 __be64 TGIANT_prio_0;
418 __be64 TGIANT_prio_1;
419 __be64 TGIANT_prio_2;
420 __be64 TGIANT_prio_3;
421 __be64 TGIANT_prio_4;
422 __be64 TGIANT_prio_5;
423 __be64 TGIANT_prio_6;
424 __be64 TGIANT_prio_7;
425 __be64 TGIANT_novlan;
426 __be64 TGIANT_loopbk;
427 /* Transmit broadcast frames with a good CRC */
428 __be64 TBCAST_prio_0;
429 __be64 TBCAST_prio_1;
430 __be64 TBCAST_prio_2;
431 __be64 TBCAST_prio_3;
432 __be64 TBCAST_prio_4;
433 __be64 TBCAST_prio_5;
434 __be64 TBCAST_prio_6;
435 __be64 TBCAST_prio_7;
436 __be64 TBCAST_novlan;
437 __be64 TBCAST_loopbk;
438 /* Transmit multicast frames with a good CRC */
439 __be64 TMCAST_prio_0;
440 __be64 TMCAST_prio_1;
441 __be64 TMCAST_prio_2;
442 __be64 TMCAST_prio_3;
443 __be64 TMCAST_prio_4;
444 __be64 TMCAST_prio_5;
445 __be64 TMCAST_prio_6;
446 __be64 TMCAST_prio_7;
447 __be64 TMCAST_novlan;
448 __be64 TMCAST_loopbk;
449 /* Transmit good frames that are neither broadcast nor multicast */
450 __be64 TTOTG_prio_0;
451 __be64 TTOTG_prio_1;
452 __be64 TTOTG_prio_2;
453 __be64 TTOTG_prio_3;
454 __be64 TTOTG_prio_4;
455 __be64 TTOTG_prio_5;
456 __be64 TTOTG_prio_6;
457 __be64 TTOTG_prio_7;
458 __be64 TTOTG_novlan;
459 __be64 TTOTG_loopbk;
460
461 /* total octets of transmitted frames, including framing characters */
462 __be64 TTTLOCT_prio_0;
463 /* total octets of transmitted frames, not including framing characters */
464 __be64 TTTLOCT_NOFRM_prio_0;
465 /* ifOutOctets */
466 __be64 TOCT_prio_0;
467
468 __be64 TTTLOCT_prio_1;
469 __be64 TTTLOCT_NOFRM_prio_1;
470 __be64 TOCT_prio_1;
471
472 __be64 TTTLOCT_prio_2;
473 __be64 TTTLOCT_NOFRM_prio_2;
474 __be64 TOCT_prio_2;
475
476 __be64 TTTLOCT_prio_3;
477 __be64 TTTLOCT_NOFRM_prio_3;
478 __be64 TOCT_prio_3;
479
480 __be64 TTTLOCT_prio_4;
481 __be64 TTTLOCT_NOFRM_prio_4;
482 __be64 TOCT_prio_4;
483
484 __be64 TTTLOCT_prio_5;
485 __be64 TTTLOCT_NOFRM_prio_5;
486 __be64 TOCT_prio_5;
487
488 __be64 TTTLOCT_prio_6;
489 __be64 TTTLOCT_NOFRM_prio_6;
490 __be64 TOCT_prio_6;
491
492 __be64 TTTLOCT_prio_7;
493 __be64 TTTLOCT_NOFRM_prio_7;
494 __be64 TOCT_prio_7;
495
496 __be64 TTTLOCT_novlan;
497 __be64 TTTLOCT_NOFRM_novlan;
498 __be64 TOCT_novlan;
499
500 __be64 TTTLOCT_loopbk;
501 __be64 TTTLOCT_NOFRM_loopbk;
502 __be64 TOCT_loopbk;
503
504 /* Total frames transmitted with a good CRC that are not aborted */
505 __be64 TTOT_prio_0;
506 /* Total number of frames transmitted with 802.1Q encapsulation */
507 __be64 T1Q_prio_0;
508 __be64 reserved13;
509
510 __be64 TTOT_prio_1;
511 __be64 T1Q_prio_1;
512 __be64 reserved14;
513
514 __be64 TTOT_prio_2;
515 __be64 T1Q_prio_2;
516 __be64 reserved15;
517
518 __be64 TTOT_prio_3;
519 __be64 T1Q_prio_3;
520 __be64 reserved16;
521
522 __be64 TTOT_prio_4;
523 __be64 T1Q_prio_4;
524 __be64 reserved17;
525
526 __be64 TTOT_prio_5;
527 __be64 T1Q_prio_5;
528 __be64 reserved18;
529
530 __be64 TTOT_prio_6;
531 __be64 T1Q_prio_6;
532 __be64 reserved19;
533
534 __be64 TTOT_prio_7;
535 __be64 T1Q_prio_7;
536 __be64 reserved20;
537
538 __be64 TTOT_novlan;
539 __be64 T1Q_novlan;
540 __be64 reserved21;
541
542 __be64 TTOT_loopbk;
543 __be64 T1Q_loopbk;
544 __be64 reserved22;
545
546 /* Received frames with a length greater than MTU octets and a bad CRC */
547 __be32 RJBBR;
548 /* Received frames with a bad CRC that are not runts, jabbers,
549 or alignment errors */
550 __be32 RCRC;
551 /* Received frames with SFD with a length of less than 64 octets and a
552 bad CRC */
553 __be32 RRUNT;
554 /* Received frames with a length less than 64 octets and a good CRC */
555 __be32 RSHORT;
556 /* Total Number of Received Packets Dropped */
557 __be32 RDROP;
558 /* Drop due to overflow */
559 __be32 RdropOvflw;
560 /* Drop due to overflow */
561 __be32 RdropLength;
562 /* Total of good frames. Does not include frames received with
563 frame-too-long, FCS, or length errors */
564 __be32 RTOTFRMS;
565 /* Total dropped Xmited packets */
566 __be32 TDROP;
567};
568
569
570#endif
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
new file mode 100644
index 000000000000..a0545209e507
--- /dev/null
+++ b/drivers/net/mlx4/en_resources.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/vmalloc.h>
35#include <linux/mlx4/qp.h>
36
37#include "mlx4_en.h"
38
39void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
40 int is_tx, int rss, int qpn, int cqn, int srqn,
41 struct mlx4_qp_context *context)
42{
43 struct mlx4_en_dev *mdev = priv->mdev;
44
45 memset(context, 0, sizeof *context);
46 context->flags = cpu_to_be32(7 << 16 | rss << 13);
47 context->pd = cpu_to_be32(mdev->priv_pdn);
48 context->mtu_msgmax = 0xff;
49 context->rq_size_stride = 0;
50 if (is_tx)
51 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
52 else
53 context->sq_size_stride = 1;
54 context->usr_page = cpu_to_be32(mdev->priv_uar.index);
55 context->local_qpn = cpu_to_be32(qpn);
56 context->pri_path.ackto = 1 & 0x07;
57 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
58 context->pri_path.counter_index = 0xff;
59 context->cqn_send = cpu_to_be32(cqn);
60 context->cqn_recv = cpu_to_be32(cqn);
61 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
62 if (!rss)
63 context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn);
64}
65
66
67int mlx4_en_map_buffer(struct mlx4_buf *buf)
68{
69 struct page **pages;
70 int i;
71
72 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
73 return 0;
74
75 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
76 if (!pages)
77 return -ENOMEM;
78
79 for (i = 0; i < buf->nbufs; ++i)
80 pages[i] = virt_to_page(buf->page_list[i].buf);
81
82 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
83 kfree(pages);
84 if (!buf->direct.buf)
85 return -ENOMEM;
86
87 return 0;
88}
89
90void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
91{
92 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
93 return;
94
95 vunmap(buf->direct.buf);
96}
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
new file mode 100644
index 000000000000..6232227f56c3
--- /dev/null
+++ b/drivers/net/mlx4/en_rx.c
@@ -0,0 +1,1080 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/skbuff.h>
37#include <linux/if_ether.h>
38#include <linux/if_vlan.h>
39#include <linux/vmalloc.h>
40
41#include "mlx4_en.h"
42
43static void *get_wqe(struct mlx4_en_rx_ring *ring, int n)
44{
45 int offset = n << ring->srq.wqe_shift;
46 return ring->buf + offset;
47}
48
49static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
50{
51 return;
52}
53
54static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
55 void **ip_hdr, void **tcpudp_hdr,
56 u64 *hdr_flags, void *priv)
57{
58 *mac_hdr = page_address(frags->page) + frags->page_offset;
59 *ip_hdr = *mac_hdr + ETH_HLEN;
60 *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
61 *hdr_flags = LRO_IPV4 | LRO_TCP;
62
63 return 0;
64}
65
66static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
67 struct mlx4_en_rx_desc *rx_desc,
68 struct skb_frag_struct *skb_frags,
69 struct mlx4_en_rx_alloc *ring_alloc,
70 int i)
71{
72 struct mlx4_en_dev *mdev = priv->mdev;
73 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
74 struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
75 struct page *page;
76 dma_addr_t dma;
77
78 if (page_alloc->offset == frag_info->last_offset) {
79 /* Allocate new page */
80 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
81 if (!page)
82 return -ENOMEM;
83
84 skb_frags[i].page = page_alloc->page;
85 skb_frags[i].page_offset = page_alloc->offset;
86 page_alloc->page = page;
87 page_alloc->offset = frag_info->frag_align;
88 } else {
89 page = page_alloc->page;
90 get_page(page);
91
92 skb_frags[i].page = page;
93 skb_frags[i].page_offset = page_alloc->offset;
94 page_alloc->offset += frag_info->frag_stride;
95 }
96 dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
97 skb_frags[i].page_offset, frag_info->frag_size,
98 PCI_DMA_FROMDEVICE);
99 rx_desc->data[i].addr = cpu_to_be64(dma);
100 return 0;
101}
102
103static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
104 struct mlx4_en_rx_ring *ring)
105{
106 struct mlx4_en_rx_alloc *page_alloc;
107 int i;
108
109 for (i = 0; i < priv->num_frags; i++) {
110 page_alloc = &ring->page_alloc[i];
111 page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
112 MLX4_EN_ALLOC_ORDER);
113 if (!page_alloc->page)
114 goto out;
115
116 page_alloc->offset = priv->frag_info[i].frag_align;
117 mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
118 i, page_alloc->page);
119 }
120 return 0;
121
122out:
123 while (i--) {
124 page_alloc = &ring->page_alloc[i];
125 put_page(page_alloc->page);
126 page_alloc->page = NULL;
127 }
128 return -ENOMEM;
129}
130
131static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
132 struct mlx4_en_rx_ring *ring)
133{
134 struct mlx4_en_rx_alloc *page_alloc;
135 int i;
136
137 for (i = 0; i < priv->num_frags; i++) {
138 page_alloc = &ring->page_alloc[i];
139 mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
140 i, page_count(page_alloc->page));
141
142 put_page(page_alloc->page);
143 page_alloc->page = NULL;
144 }
145}
146
147
148static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
149 struct mlx4_en_rx_ring *ring, int index)
150{
151 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
152 struct skb_frag_struct *skb_frags = ring->rx_info +
153 (index << priv->log_rx_info);
154 int possible_frags;
155 int i;
156
157 /* Pre-link descriptor */
158 rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
159
160 /* Set size and memtype fields */
161 for (i = 0; i < priv->num_frags; i++) {
162 skb_frags[i].size = priv->frag_info[i].frag_size;
163 rx_desc->data[i].byte_count =
164 cpu_to_be32(priv->frag_info[i].frag_size);
165 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
166 }
167
168 /* If the number of used fragments does not fill up the ring stride,
169 * remaining (unused) fragments must be padded with null address/size
170 * and a special memory key */
171 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
172 for (i = priv->num_frags; i < possible_frags; i++) {
173 rx_desc->data[i].byte_count = 0;
174 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
175 rx_desc->data[i].addr = 0;
176 }
177}
178
179
180static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
181 struct mlx4_en_rx_ring *ring, int index)
182{
183 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
184 struct skb_frag_struct *skb_frags = ring->rx_info +
185 (index << priv->log_rx_info);
186 int i;
187
188 for (i = 0; i < priv->num_frags; i++)
189 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
190 goto err;
191
192 return 0;
193
194err:
195 while (i--)
196 put_page(skb_frags[i].page);
197 return -ENOMEM;
198}
199
200static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
201{
202 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
203}
204
205static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
206{
207 struct mlx4_en_dev *mdev = priv->mdev;
208 struct mlx4_en_rx_ring *ring;
209 int ring_ind;
210 int buf_ind;
211
212 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
213 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
214 ring = &priv->rx_ring[ring_ind];
215
216 if (mlx4_en_prepare_rx_desc(priv, ring,
217 ring->actual_size)) {
218 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
219 mlx4_err(mdev, "Failed to allocate "
220 "enough rx buffers\n");
221 return -ENOMEM;
222 } else {
223 if (netif_msg_rx_err(priv))
224 mlx4_warn(mdev,
225 "Only %d buffers allocated\n",
226 ring->actual_size);
227 goto out;
228 }
229 }
230 ring->actual_size++;
231 ring->prod++;
232 }
233 }
234out:
235 return 0;
236}
237
238static int mlx4_en_fill_rx_buf(struct net_device *dev,
239 struct mlx4_en_rx_ring *ring)
240{
241 struct mlx4_en_priv *priv = netdev_priv(dev);
242 int num = 0;
243 int err;
244
245 while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
246 err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
247 ring->size_mask);
248 if (err) {
249 if (netif_msg_rx_err(priv))
250 mlx4_warn(priv->mdev,
251 "Failed preparing rx descriptor\n");
252 priv->port_stats.rx_alloc_failed++;
253 break;
254 }
255 ++num;
256 ++ring->prod;
257 }
258 if ((u32) (ring->prod - ring->cons) == ring->size)
259 ring->full = 1;
260
261 return num;
262}
263
264static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
265 struct mlx4_en_rx_ring *ring)
266{
267 struct mlx4_en_dev *mdev = priv->mdev;
268 struct skb_frag_struct *skb_frags;
269 struct mlx4_en_rx_desc *rx_desc;
270 dma_addr_t dma;
271 int index;
272 int nr;
273
274 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
275 ring->cons, ring->prod);
276
277 /* Unmap and free Rx buffers */
278 BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
279 while (ring->cons != ring->prod) {
280 index = ring->cons & ring->size_mask;
281 rx_desc = ring->buf + (index << ring->log_stride);
282 skb_frags = ring->rx_info + (index << priv->log_rx_info);
283 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
284
285 for (nr = 0; nr < priv->num_frags; nr++) {
286 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
287 dma = be64_to_cpu(rx_desc->data[nr].addr);
288
289 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
290 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
291 PCI_DMA_FROMDEVICE);
292 put_page(skb_frags[nr].page);
293 }
294 ++ring->cons;
295 }
296}
297
298
299void mlx4_en_rx_refill(struct work_struct *work)
300{
301 struct delayed_work *delay = container_of(work, struct delayed_work, work);
302 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
303 refill_task);
304 struct mlx4_en_dev *mdev = priv->mdev;
305 struct net_device *dev = priv->dev;
306 struct mlx4_en_rx_ring *ring;
307 int need_refill = 0;
308 int i;
309
310 mutex_lock(&mdev->state_lock);
311 if (!mdev->device_up || !priv->port_up)
312 goto out;
313
314 /* We only get here if there are no receive buffers, so we can't race
315 * with Rx interrupts while filling buffers */
316 for (i = 0; i < priv->rx_ring_num; i++) {
317 ring = &priv->rx_ring[i];
318 if (ring->need_refill) {
319 if (mlx4_en_fill_rx_buf(dev, ring)) {
320 ring->need_refill = 0;
321 mlx4_en_update_rx_prod_db(ring);
322 } else
323 need_refill = 1;
324 }
325 }
326 if (need_refill)
327 queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
328
329out:
330 mutex_unlock(&mdev->state_lock);
331}
332
333
334int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
335 struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
336{
337 struct mlx4_en_dev *mdev = priv->mdev;
338 int err;
339 int tmp;
340
341 /* Sanity check SRQ size before proceeding */
342 if (size >= mdev->dev->caps.max_srq_wqes)
343 return -EINVAL;
344
345 ring->prod = 0;
346 ring->cons = 0;
347 ring->size = size;
348 ring->size_mask = size - 1;
349 ring->stride = stride;
350 ring->log_stride = ffs(ring->stride) - 1;
351 ring->buf_size = ring->size * ring->stride;
352
353 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
354 sizeof(struct skb_frag_struct));
355 ring->rx_info = vmalloc(tmp);
356 if (!ring->rx_info) {
357 mlx4_err(mdev, "Failed allocating rx_info ring\n");
358 return -ENOMEM;
359 }
360 mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
361 ring->rx_info, tmp);
362
363 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
364 ring->buf_size, 2 * PAGE_SIZE);
365 if (err)
366 goto err_ring;
367
368 err = mlx4_en_map_buffer(&ring->wqres.buf);
369 if (err) {
370 mlx4_err(mdev, "Failed to map RX buffer\n");
371 goto err_hwq;
372 }
373 ring->buf = ring->wqres.buf.direct.buf;
374
375 /* Configure lro mngr */
376 memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
377 ring->lro.dev = priv->dev;
378 ring->lro.features = LRO_F_NAPI;
379 ring->lro.frag_align_pad = NET_IP_ALIGN;
380 ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
381 ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
382 ring->lro.max_desc = mdev->profile.num_lro;
383 ring->lro.max_aggr = MAX_SKB_FRAGS;
384 ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
385 sizeof(struct net_lro_desc),
386 GFP_KERNEL);
387 if (!ring->lro.lro_arr) {
388 mlx4_err(mdev, "Failed to allocate lro array\n");
389 goto err_map;
390 }
391 ring->lro.get_frag_header = mlx4_en_get_frag_header;
392
393 return 0;
394
395err_map:
396 mlx4_en_unmap_buffer(&ring->wqres.buf);
397err_hwq:
398 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
399err_ring:
400 vfree(ring->rx_info);
401 ring->rx_info = NULL;
402 return err;
403}
404
405int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
406{
407 struct mlx4_en_dev *mdev = priv->mdev;
408 struct mlx4_wqe_srq_next_seg *next;
409 struct mlx4_en_rx_ring *ring;
410 int i;
411 int ring_ind;
412 int err;
413 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
414 DS_SIZE * priv->num_frags);
415 int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE;
416
417 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
418 ring = &priv->rx_ring[ring_ind];
419
420 ring->prod = 0;
421 ring->cons = 0;
422 ring->actual_size = 0;
423 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
424
425 ring->stride = stride;
426 ring->log_stride = ffs(ring->stride) - 1;
427 ring->buf_size = ring->size * ring->stride;
428
429 memset(ring->buf, 0, ring->buf_size);
430 mlx4_en_update_rx_prod_db(ring);
431
432 /* Initailize all descriptors */
433 for (i = 0; i < ring->size; i++)
434 mlx4_en_init_rx_desc(priv, ring, i);
435
436 /* Initialize page allocators */
437 err = mlx4_en_init_allocator(priv, ring);
438 if (err) {
439 mlx4_err(mdev, "Failed initializing ring allocator\n");
440 goto err_allocator;
441 }
442
443 /* Fill Rx buffers */
444 ring->full = 0;
445 }
446 if (mlx4_en_fill_rx_buffers(priv))
447 goto err_buffers;
448
449 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
450 ring = &priv->rx_ring[ring_ind];
451
452 mlx4_en_update_rx_prod_db(ring);
453
454 /* Configure SRQ representing the ring */
455 ring->srq.max = ring->size;
456 ring->srq.max_gs = max_gs;
457 ring->srq.wqe_shift = ilog2(ring->stride);
458
459 for (i = 0; i < ring->srq.max; ++i) {
460 next = get_wqe(ring, i);
461 next->next_wqe_index =
462 cpu_to_be16((i + 1) & (ring->srq.max - 1));
463 }
464
465 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
466 ring->wqres.db.dma, &ring->srq);
467 if (err){
468 mlx4_err(mdev, "Failed to allocate srq\n");
469 goto err_srq;
470 }
471 ring->srq.event = mlx4_en_srq_event;
472 }
473
474 return 0;
475
476err_srq:
477 while (ring_ind >= 0) {
478 ring = &priv->rx_ring[ring_ind];
479 mlx4_srq_free(mdev->dev, &ring->srq);
480 ring_ind--;
481 }
482
483err_buffers:
484 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
485 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
486
487 ring_ind = priv->rx_ring_num - 1;
488err_allocator:
489 while (ring_ind >= 0) {
490 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
491 ring_ind--;
492 }
493 return err;
494}
495
496void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
497 struct mlx4_en_rx_ring *ring)
498{
499 struct mlx4_en_dev *mdev = priv->mdev;
500
501 kfree(ring->lro.lro_arr);
502 mlx4_en_unmap_buffer(&ring->wqres.buf);
503 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
504 vfree(ring->rx_info);
505 ring->rx_info = NULL;
506}
507
508void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
509 struct mlx4_en_rx_ring *ring)
510{
511 struct mlx4_en_dev *mdev = priv->mdev;
512
513 mlx4_srq_free(mdev->dev, &ring->srq);
514 mlx4_en_free_rx_buf(priv, ring);
515 mlx4_en_destroy_allocator(priv, ring);
516}
517
518
519/* Unmap a completed descriptor and free unused pages */
520static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
521 struct mlx4_en_rx_desc *rx_desc,
522 struct skb_frag_struct *skb_frags,
523 struct skb_frag_struct *skb_frags_rx,
524 struct mlx4_en_rx_alloc *page_alloc,
525 int length)
526{
527 struct mlx4_en_dev *mdev = priv->mdev;
528 struct mlx4_en_frag_info *frag_info;
529 int nr;
530 dma_addr_t dma;
531
532 /* Collect used fragments while replacing them in the HW descirptors */
533 for (nr = 0; nr < priv->num_frags; nr++) {
534 frag_info = &priv->frag_info[nr];
535 if (length <= frag_info->frag_prefix_size)
536 break;
537
538 /* Save page reference in skb */
539 skb_frags_rx[nr].page = skb_frags[nr].page;
540 skb_frags_rx[nr].size = skb_frags[nr].size;
541 skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
542 dma = be64_to_cpu(rx_desc->data[nr].addr);
543
544 /* Allocate a replacement page */
545 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
546 goto fail;
547
548 /* Unmap buffer */
549 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
550 PCI_DMA_FROMDEVICE);
551 }
552 /* Adjust size of last fragment to match actual length */
553 skb_frags_rx[nr - 1].size = length -
554 priv->frag_info[nr - 1].frag_prefix_size;
555 return nr;
556
557fail:
558 /* Drop all accumulated fragments (which have already been replaced in
559 * the descriptor) of this packet; remaining fragments are reused... */
560 while (nr > 0) {
561 nr--;
562 put_page(skb_frags_rx[nr].page);
563 }
564 return 0;
565}
566
567
568static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
569 struct mlx4_en_rx_desc *rx_desc,
570 struct skb_frag_struct *skb_frags,
571 struct mlx4_en_rx_alloc *page_alloc,
572 unsigned int length)
573{
574 struct mlx4_en_dev *mdev = priv->mdev;
575 struct sk_buff *skb;
576 void *va;
577 int used_frags;
578 dma_addr_t dma;
579
580 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
581 if (!skb) {
582 mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
583 return NULL;
584 }
585 skb->dev = priv->dev;
586 skb_reserve(skb, NET_IP_ALIGN);
587 skb->len = length;
588 skb->truesize = length + sizeof(struct sk_buff);
589
590 /* Get pointer to first fragment so we could copy the headers into the
591 * (linear part of the) skb */
592 va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
593
594 if (length <= SMALL_PACKET_SIZE) {
595 /* We are copying all relevant data to the skb - temporarily
596 * synch buffers for the copy */
597 dma = be64_to_cpu(rx_desc->data[0].addr);
598 dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
599 length, DMA_FROM_DEVICE);
600 skb_copy_to_linear_data(skb, va, length);
601 dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
602 length, DMA_FROM_DEVICE);
603 skb->tail += length;
604 } else {
605
606 /* Move relevant fragments to skb */
607 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
608 skb_shinfo(skb)->frags,
609 page_alloc, length);
610 skb_shinfo(skb)->nr_frags = used_frags;
611
612 /* Copy headers into the skb linear buffer */
613 memcpy(skb->data, va, HEADER_COPY_SIZE);
614 skb->tail += HEADER_COPY_SIZE;
615
616 /* Skip headers in first fragment */
617 skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
618
619 /* Adjust size of first fragment */
620 skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
621 skb->data_len = length - HEADER_COPY_SIZE;
622 }
623 return skb;
624}
625
626static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
627 struct mlx4_en_rx_ring *ring,
628 int from, int to, int num)
629{
630 struct skb_frag_struct *skb_frags_from;
631 struct skb_frag_struct *skb_frags_to;
632 struct mlx4_en_rx_desc *rx_desc_from;
633 struct mlx4_en_rx_desc *rx_desc_to;
634 int from_index, to_index;
635 int nr, i;
636
637 for (i = 0; i < num; i++) {
638 from_index = (from + i) & ring->size_mask;
639 to_index = (to + i) & ring->size_mask;
640 skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
641 skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
642 rx_desc_from = ring->buf + (from_index << ring->log_stride);
643 rx_desc_to = ring->buf + (to_index << ring->log_stride);
644
645 for (nr = 0; nr < priv->num_frags; nr++) {
646 skb_frags_to[nr].page = skb_frags_from[nr].page;
647 skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
648 rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
649 }
650 }
651}
652
653
654int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
655{
656 struct mlx4_en_priv *priv = netdev_priv(dev);
657 struct mlx4_en_dev *mdev = priv->mdev;
658 struct mlx4_cqe *cqe;
659 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
660 struct skb_frag_struct *skb_frags;
661 struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
662 struct mlx4_en_rx_desc *rx_desc;
663 struct sk_buff *skb;
664 int index;
665 int nr;
666 unsigned int length;
667 int polled = 0;
668 int ip_summed;
669
670 if (!priv->port_up)
671 return 0;
672
673 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
674 * descriptor offset can be deduced from the CQE index instead of
675 * reading 'cqe->index' */
676 index = cq->mcq.cons_index & ring->size_mask;
677 cqe = &cq->buf[index];
678
679 /* Process all completed CQEs */
680 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
681 cq->mcq.cons_index & cq->size)) {
682
683 skb_frags = ring->rx_info + (index << priv->log_rx_info);
684 rx_desc = ring->buf + (index << ring->log_stride);
685
686 /*
687 * make sure we read the CQE after we read the ownership bit
688 */
689 rmb();
690
691 /* Drop packet on bad receive or bad checksum */
692 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
693 MLX4_CQE_OPCODE_ERROR)) {
694 mlx4_err(mdev, "CQE completed in error - vendor "
695 "syndrom:%d syndrom:%d\n",
696 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
697 ((struct mlx4_err_cqe *) cqe)->syndrome);
698 goto next;
699 }
700 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
701 mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
702 goto next;
703 }
704
705 /*
706 * Packet is OK - process it.
707 */
708 length = be32_to_cpu(cqe->byte_cnt);
709 ring->bytes += length;
710 ring->packets++;
711
712 if (likely(priv->rx_csum)) {
713 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
714 (cqe->checksum == cpu_to_be16(0xffff))) {
715 priv->port_stats.rx_chksum_good++;
716 /* This packet is eligible for LRO if it is:
717 * - DIX Ethernet (type interpretation)
718 * - TCP/IP (v4)
719 * - without IP options
720 * - not an IP fragment */
721 if (mlx4_en_can_lro(cqe->status) &&
722 dev->features & NETIF_F_LRO) {
723
724 nr = mlx4_en_complete_rx_desc(
725 priv, rx_desc,
726 skb_frags, lro_frags,
727 ring->page_alloc, length);
728 if (!nr)
729 goto next;
730
731 if (priv->vlgrp && (cqe->vlan_my_qpn &
732 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
733 lro_vlan_hwaccel_receive_frags(
734 &ring->lro, lro_frags,
735 length, length,
736 priv->vlgrp,
737 be16_to_cpu(cqe->sl_vid),
738 NULL, 0);
739 } else
740 lro_receive_frags(&ring->lro,
741 lro_frags,
742 length,
743 length,
744 NULL, 0);
745
746 goto next;
747 }
748
749 /* LRO not possible, complete processing here */
750 ip_summed = CHECKSUM_UNNECESSARY;
751 INC_PERF_COUNTER(priv->pstats.lro_misses);
752 } else {
753 ip_summed = CHECKSUM_NONE;
754 priv->port_stats.rx_chksum_none++;
755 }
756 } else {
757 ip_summed = CHECKSUM_NONE;
758 priv->port_stats.rx_chksum_none++;
759 }
760
761 skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
762 ring->page_alloc, length);
763 if (!skb) {
764 priv->stats.rx_dropped++;
765 goto next;
766 }
767
768 skb->ip_summed = ip_summed;
769 skb->protocol = eth_type_trans(skb, dev);
770
771 /* Push it up the stack */
772 if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
773 MLX4_CQE_VLAN_PRESENT_MASK)) {
774 vlan_hwaccel_receive_skb(skb, priv->vlgrp,
775 be16_to_cpu(cqe->sl_vid));
776 } else
777 netif_receive_skb(skb);
778
779 dev->last_rx = jiffies;
780
781next:
782 ++cq->mcq.cons_index;
783 index = (cq->mcq.cons_index) & ring->size_mask;
784 cqe = &cq->buf[index];
785 if (++polled == budget) {
786 /* We are here because we reached the NAPI budget -
787 * flush only pending LRO sessions */
788 lro_flush_all(&ring->lro);
789 goto out;
790 }
791 }
792
793 /* If CQ is empty flush all LRO sessions unconditionally */
794 lro_flush_all(&ring->lro);
795
796out:
797 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
798 mlx4_cq_set_ci(&cq->mcq);
799 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
800 ring->cons = cq->mcq.cons_index;
801 ring->prod += polled; /* Polled descriptors were realocated in place */
802 if (unlikely(!ring->full)) {
803 mlx4_en_copy_desc(priv, ring, ring->cons - polled,
804 ring->prod - polled, polled);
805 mlx4_en_fill_rx_buf(dev, ring);
806 }
807 mlx4_en_update_rx_prod_db(ring);
808 return polled;
809}
810
811
812void mlx4_en_rx_irq(struct mlx4_cq *mcq)
813{
814 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
815 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
816
817 if (priv->port_up)
818 netif_rx_schedule(cq->dev, &cq->napi);
819 else
820 mlx4_en_arm_cq(priv, cq);
821}
822
823/* Rx CQ polling - called by NAPI */
824int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
825{
826 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
827 struct net_device *dev = cq->dev;
828 struct mlx4_en_priv *priv = netdev_priv(dev);
829 int done;
830
831 done = mlx4_en_process_rx_cq(dev, cq, budget);
832
833 /* If we used up all the quota - we're probably not done yet... */
834 if (done == budget)
835 INC_PERF_COUNTER(priv->pstats.napi_quota);
836 else {
837 /* Done for now */
838 netif_rx_complete(dev, napi);
839 mlx4_en_arm_cq(priv, cq);
840 }
841 return done;
842}
843
844
845/* Calculate the last offset position that accomodates a full fragment
846 * (assuming fagment size = stride-align) */
847static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
848{
849 u16 res = MLX4_EN_ALLOC_SIZE % stride;
850 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
851
852 mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
853 "res:%d offset:%d\n", stride, align, res, offset);
854 return offset;
855}
856
857
858static int frag_sizes[] = {
859 FRAG_SZ0,
860 FRAG_SZ1,
861 FRAG_SZ2,
862 FRAG_SZ3
863};
864
865void mlx4_en_calc_rx_buf(struct net_device *dev)
866{
867 struct mlx4_en_priv *priv = netdev_priv(dev);
868 int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
869 int buf_size = 0;
870 int i = 0;
871
872 while (buf_size < eff_mtu) {
873 priv->frag_info[i].frag_size =
874 (eff_mtu > buf_size + frag_sizes[i]) ?
875 frag_sizes[i] : eff_mtu - buf_size;
876 priv->frag_info[i].frag_prefix_size = buf_size;
877 if (!i) {
878 priv->frag_info[i].frag_align = NET_IP_ALIGN;
879 priv->frag_info[i].frag_stride =
880 ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
881 } else {
882 priv->frag_info[i].frag_align = 0;
883 priv->frag_info[i].frag_stride =
884 ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
885 }
886 priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
887 priv, priv->frag_info[i].frag_stride,
888 priv->frag_info[i].frag_align);
889 buf_size += priv->frag_info[i].frag_size;
890 i++;
891 }
892
893 priv->num_frags = i;
894 priv->rx_skb_size = eff_mtu;
895 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
896
897 mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
898 "num_frags:%d):\n", eff_mtu, priv->num_frags);
899 for (i = 0; i < priv->num_frags; i++) {
900 mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
901 "stride:%d last_offset:%d\n", i,
902 priv->frag_info[i].frag_size,
903 priv->frag_info[i].frag_prefix_size,
904 priv->frag_info[i].frag_align,
905 priv->frag_info[i].frag_stride,
906 priv->frag_info[i].last_offset);
907 }
908}
909
910/* RSS related functions */
911
912/* Calculate rss size and map each entry in rss table to rx ring */
913void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
914 struct mlx4_en_rss_map *rss_map,
915 int num_entries, int num_rings)
916{
917 int i;
918
919 rss_map->size = roundup_pow_of_two(num_entries);
920 mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
921 rss_map->size);
922
923 for (i = 0; i < rss_map->size; i++) {
924 rss_map->map[i] = i % num_rings;
925 mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
926 }
927}
928
929static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
930{
931 return;
932}
933
934
935static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
936 int qpn, int srqn, int cqn,
937 enum mlx4_qp_state *state,
938 struct mlx4_qp *qp)
939{
940 struct mlx4_en_dev *mdev = priv->mdev;
941 struct mlx4_qp_context *context;
942 int err = 0;
943
944 context = kmalloc(sizeof *context , GFP_KERNEL);
945 if (!context) {
946 mlx4_err(mdev, "Failed to allocate qp context\n");
947 return -ENOMEM;
948 }
949
950 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
951 if (err) {
952 mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn);
953 goto out;
954 return err;
955 }
956 qp->event = mlx4_en_sqp_event;
957
958 memset(context, 0, sizeof *context);
959 mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context);
960
961 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state);
962 if (err) {
963 mlx4_qp_remove(mdev->dev, qp);
964 mlx4_qp_free(mdev->dev, qp);
965 }
966out:
967 kfree(context);
968 return err;
969}
970
971/* Allocate rx qp's and configure them according to rss map */
972int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
973{
974 struct mlx4_en_dev *mdev = priv->mdev;
975 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
976 struct mlx4_qp_context context;
977 struct mlx4_en_rss_context *rss_context;
978 void *ptr;
979 int rss_xor = mdev->profile.rss_xor;
980 u8 rss_mask = mdev->profile.rss_mask;
981 int i, srqn, qpn, cqn;
982 int err = 0;
983 int good_qps = 0;
984
985 mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
986 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
987 rss_map->size, &rss_map->base_qpn);
988 if (err) {
989 mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
990 rss_map->size, priv->port);
991 return err;
992 }
993
994 for (i = 0; i < rss_map->size; i++) {
995 cqn = priv->rx_ring[rss_map->map[i]].cqn;
996 srqn = priv->rx_ring[rss_map->map[i]].srq.srqn;
997 qpn = rss_map->base_qpn + i;
998 err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn,
999 &rss_map->state[i],
1000 &rss_map->qps[i]);
1001 if (err)
1002 goto rss_err;
1003
1004 ++good_qps;
1005 }
1006
1007 /* Configure RSS indirection qp */
1008 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
1009 if (err) {
1010 mlx4_err(mdev, "Failed to reserve range for RSS "
1011 "indirection qp\n");
1012 goto rss_err;
1013 }
1014 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
1015 if (err) {
1016 mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
1017 goto reserve_err;
1018 }
1019 rss_map->indir_qp.event = mlx4_en_sqp_event;
1020 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1021 priv->rx_ring[0].cqn, 0, &context);
1022
1023 ptr = ((void *) &context) + 0x3c;
1024 rss_context = (struct mlx4_en_rss_context *) ptr;
1025 rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 |
1026 (rss_map->base_qpn));
1027 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1028 rss_context->hash_fn = rss_xor & 0x3;
1029 rss_context->flags = rss_mask << 2;
1030
1031 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
1032 &rss_map->indir_qp, &rss_map->indir_state);
1033 if (err)
1034 goto indir_err;
1035
1036 return 0;
1037
1038indir_err:
1039 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1040 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1041 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1042 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1043reserve_err:
1044 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
1045rss_err:
1046 for (i = 0; i < good_qps; i++) {
1047 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1048 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1049 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1050 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1051 }
1052 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
1053 return err;
1054}
1055
1056void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
1057{
1058 struct mlx4_en_dev *mdev = priv->mdev;
1059 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1060 int i;
1061
1062 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1063 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1064 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1065 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1066 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
1067
1068 for (i = 0; i < rss_map->size; i++) {
1069 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1070 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1071 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1072 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1073 }
1074 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
1075}
1076
1077
1078
1079
1080
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
new file mode 100644
index 000000000000..8592f8fb8475
--- /dev/null
+++ b/drivers/net/mlx4/en_tx.c
@@ -0,0 +1,820 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <asm/page.h>
35#include <linux/mlx4/cq.h>
36#include <linux/mlx4/qp.h>
37#include <linux/skbuff.h>
38#include <linux/if_vlan.h>
39#include <linux/vmalloc.h>
40
41#include "mlx4_en.h"
42
43enum {
44 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
45};
46
47static int inline_thold __read_mostly = MAX_INLINE;
48
49module_param_named(inline_thold, inline_thold, int, 0444);
50MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
51
52int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
53 struct mlx4_en_tx_ring *ring, u32 size,
54 u16 stride)
55{
56 struct mlx4_en_dev *mdev = priv->mdev;
57 int tmp;
58 int err;
59
60 ring->size = size;
61 ring->size_mask = size - 1;
62 ring->stride = stride;
63
64 inline_thold = min(inline_thold, MAX_INLINE);
65
66 spin_lock_init(&ring->comp_lock);
67
68 tmp = size * sizeof(struct mlx4_en_tx_info);
69 ring->tx_info = vmalloc(tmp);
70 if (!ring->tx_info) {
71 mlx4_err(mdev, "Failed allocating tx_info ring\n");
72 return -ENOMEM;
73 }
74 mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
75 ring->tx_info, tmp);
76
77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
78 if (!ring->bounce_buf) {
79 mlx4_err(mdev, "Failed allocating bounce buffer\n");
80 err = -ENOMEM;
81 goto err_tx;
82 }
83 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
84
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE);
87 if (err) {
88 mlx4_err(mdev, "Failed allocating hwq resources\n");
89 goto err_bounce;
90 }
91
92 err = mlx4_en_map_buffer(&ring->wqres.buf);
93 if (err) {
94 mlx4_err(mdev, "Failed to map TX buffer\n");
95 goto err_hwq_res;
96 }
97
98 ring->buf = ring->wqres.buf.direct.buf;
99
100 mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103
104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
105 if (err) {
106 mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
107 goto err_map;
108 }
109
110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
111 if (err) {
112 mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
113 goto err_reserve;
114 }
115
116 return 0;
117
118err_reserve:
119 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
120err_map:
121 mlx4_en_unmap_buffer(&ring->wqres.buf);
122err_hwq_res:
123 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
124err_bounce:
125 kfree(ring->bounce_buf);
126 ring->bounce_buf = NULL;
127err_tx:
128 vfree(ring->tx_info);
129 ring->tx_info = NULL;
130 return err;
131}
132
133void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
134 struct mlx4_en_tx_ring *ring)
135{
136 struct mlx4_en_dev *mdev = priv->mdev;
137 mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
138
139 mlx4_qp_remove(mdev->dev, &ring->qp);
140 mlx4_qp_free(mdev->dev, &ring->qp);
141 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
142 mlx4_en_unmap_buffer(&ring->wqres.buf);
143 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
144 kfree(ring->bounce_buf);
145 ring->bounce_buf = NULL;
146 vfree(ring->tx_info);
147 ring->tx_info = NULL;
148}
149
150int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
151 struct mlx4_en_tx_ring *ring,
152 int cq, int srqn)
153{
154 struct mlx4_en_dev *mdev = priv->mdev;
155 int err;
156
157 ring->cqn = cq;
158 ring->prod = 0;
159 ring->cons = 0xffffffff;
160 ring->last_nr_txbb = 1;
161 ring->poll_cnt = 0;
162 ring->blocked = 0;
163 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
164 memset(ring->buf, 0, ring->buf_size);
165
166 ring->qp_state = MLX4_QP_STATE_RST;
167 ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
168
169 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
170 ring->cqn, srqn, &ring->context);
171
172 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
173 &ring->qp, &ring->qp_state);
174
175 return err;
176}
177
178void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
179 struct mlx4_en_tx_ring *ring)
180{
181 struct mlx4_en_dev *mdev = priv->mdev;
182
183 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
184 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
185}
186
187
188static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
189 struct mlx4_en_tx_ring *ring,
190 int index, u8 owner)
191{
192 struct mlx4_en_dev *mdev = priv->mdev;
193 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
194 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
195 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
196 struct sk_buff *skb = tx_info->skb;
197 struct skb_frag_struct *frag;
198 void *end = ring->buf + ring->buf_size;
199 int frags = skb_shinfo(skb)->nr_frags;
200 int i;
201 __be32 *ptr = (__be32 *)tx_desc;
202 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
203
204 /* Optimize the common case when there are no wraparounds */
205 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
206 if (tx_info->linear) {
207 pci_unmap_single(mdev->pdev,
208 (dma_addr_t) be64_to_cpu(data->addr),
209 be32_to_cpu(data->byte_count),
210 PCI_DMA_TODEVICE);
211 ++data;
212 }
213
214 for (i = 0; i < frags; i++) {
215 frag = &skb_shinfo(skb)->frags[i];
216 pci_unmap_page(mdev->pdev,
217 (dma_addr_t) be64_to_cpu(data[i].addr),
218 frag->size, PCI_DMA_TODEVICE);
219 }
220 /* Stamp the freed descriptor */
221 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
222 *ptr = stamp;
223 ptr += STAMP_DWORDS;
224 }
225
226 } else {
227 if ((void *) data >= end) {
228 data = (struct mlx4_wqe_data_seg *)
229 (ring->buf + ((void *) data - end));
230 }
231
232 if (tx_info->linear) {
233 pci_unmap_single(mdev->pdev,
234 (dma_addr_t) be64_to_cpu(data->addr),
235 be32_to_cpu(data->byte_count),
236 PCI_DMA_TODEVICE);
237 ++data;
238 }
239
240 for (i = 0; i < frags; i++) {
241 /* Check for wraparound before unmapping */
242 if ((void *) data >= end)
243 data = (struct mlx4_wqe_data_seg *) ring->buf;
244 frag = &skb_shinfo(skb)->frags[i];
245 pci_unmap_page(mdev->pdev,
246 (dma_addr_t) be64_to_cpu(data->addr),
247 frag->size, PCI_DMA_TODEVICE);
248 }
249 /* Stamp the freed descriptor */
250 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
251 *ptr = stamp;
252 ptr += STAMP_DWORDS;
253 if ((void *) ptr >= end) {
254 ptr = ring->buf;
255 stamp ^= cpu_to_be32(0x80000000);
256 }
257 }
258
259 }
260 dev_kfree_skb_any(skb);
261 return tx_info->nr_txbb;
262}
263
264
265int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
266{
267 struct mlx4_en_priv *priv = netdev_priv(dev);
268 int cnt = 0;
269
270 /* Skip last polled descriptor */
271 ring->cons += ring->last_nr_txbb;
272 mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
273 ring->cons, ring->prod);
274
275 if ((u32) (ring->prod - ring->cons) > ring->size) {
276 if (netif_msg_tx_err(priv))
277 mlx4_warn(priv->mdev, "Tx consumer passed producer!\n");
278 return 0;
279 }
280
281 while (ring->cons != ring->prod) {
282 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
283 ring->cons & ring->size_mask,
284 !!(ring->cons & ring->size));
285 ring->cons += ring->last_nr_txbb;
286 cnt++;
287 }
288
289 if (cnt)
290 mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
291
292 return cnt;
293}
294
295void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
296{
297 int block = 8 / ring_num;
298 int extra = 8 - (block * ring_num);
299 int num = 0;
300 u16 ring = 1;
301 int prio;
302
303 if (ring_num == 1) {
304 for (prio = 0; prio < 8; prio++)
305 prio_map[prio] = 0;
306 return;
307 }
308
309 for (prio = 0; prio < 8; prio++) {
310 if (extra && (num == block + 1)) {
311 ring++;
312 num = 0;
313 extra--;
314 } else if (!extra && (num == block)) {
315 ring++;
316 num = 0;
317 }
318 prio_map[prio] = ring;
319 mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
320 num++;
321 }
322}
323
324static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
325{
326 struct mlx4_en_priv *priv = netdev_priv(dev);
327 struct mlx4_cq *mcq = &cq->mcq;
328 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
329 struct mlx4_cqe *cqe = cq->buf;
330 u16 index;
331 u16 new_index;
332 u32 txbbs_skipped = 0;
333 u32 cq_last_sav;
334
335 /* index always points to the first TXBB of the last polled descriptor */
336 index = ring->cons & ring->size_mask;
337 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
338 if (index == new_index)
339 return;
340
341 if (!priv->port_up)
342 return;
343
344 /*
345 * We use a two-stage loop:
346 * - the first samples the HW-updated CQE
347 * - the second frees TXBBs until the last sample
348 * This lets us amortize CQE cache misses, while still polling the CQ
349 * until is quiescent.
350 */
351 cq_last_sav = mcq->cons_index;
352 do {
353 do {
354 /* Skip over last polled CQE */
355 index = (index + ring->last_nr_txbb) & ring->size_mask;
356 txbbs_skipped += ring->last_nr_txbb;
357
358 /* Poll next CQE */
359 ring->last_nr_txbb = mlx4_en_free_tx_desc(
360 priv, ring, index,
361 !!((ring->cons + txbbs_skipped) &
362 ring->size));
363 ++mcq->cons_index;
364
365 } while (index != new_index);
366
367 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
368 } while (index != new_index);
369 AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
370 (u32) (mcq->cons_index - cq_last_sav));
371
372 /*
373 * To prevent CQ overflow we first update CQ consumer and only then
374 * the ring consumer.
375 */
376 mlx4_cq_set_ci(mcq);
377 wmb();
378 ring->cons += txbbs_skipped;
379
380 /* Wakeup Tx queue if this ring stopped it */
381 if (unlikely(ring->blocked)) {
382 if (((u32) (ring->prod - ring->cons) <=
383 ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
384
385 /* TODO: support multiqueue netdevs. Currently, we block
386 * when *any* ring is full. Note that:
387 * - 2 Tx rings can unblock at the same time and call
388 * netif_wake_queue(), which is OK since this
389 * operation is idempotent.
390 * - We might wake the queue just after another ring
391 * stopped it. This is no big deal because the next
392 * transmission on that ring would stop the queue.
393 */
394 ring->blocked = 0;
395 netif_wake_queue(dev);
396 priv->port_stats.wake_queue++;
397 }
398 }
399}
400
401void mlx4_en_tx_irq(struct mlx4_cq *mcq)
402{
403 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
404 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
405 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
406
407 spin_lock_irq(&ring->comp_lock);
408 cq->armed = 0;
409 mlx4_en_process_tx_cq(cq->dev, cq);
410 if (ring->blocked)
411 mlx4_en_arm_cq(priv, cq);
412 else
413 mod_timer(&cq->timer, jiffies + 1);
414 spin_unlock_irq(&ring->comp_lock);
415}
416
417
418void mlx4_en_poll_tx_cq(unsigned long data)
419{
420 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
421 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
422 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
423 u32 inflight;
424
425 INC_PERF_COUNTER(priv->pstats.tx_poll);
426
427 netif_tx_lock(priv->dev);
428 spin_lock_irq(&ring->comp_lock);
429 mlx4_en_process_tx_cq(cq->dev, cq);
430 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
431
432 /* If there are still packets in flight and the timer has not already
433 * been scheduled by the Tx routine then schedule it here to guarantee
434 * completion processing of these packets */
435 if (inflight && priv->port_up)
436 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
437
438 spin_unlock_irq(&ring->comp_lock);
439 netif_tx_unlock(priv->dev);
440}
441
442static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
443 struct mlx4_en_tx_ring *ring,
444 u32 index,
445 unsigned int desc_size)
446{
447 u32 copy = (ring->size - index) * TXBB_SIZE;
448 int i;
449
450 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
451 if ((i & (TXBB_SIZE - 1)) == 0)
452 wmb();
453
454 *((u32 *) (ring->buf + i)) =
455 *((u32 *) (ring->bounce_buf + copy + i));
456 }
457
458 for (i = copy - 4; i >= 4 ; i -= 4) {
459 if ((i & (TXBB_SIZE - 1)) == 0)
460 wmb();
461
462 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
463 *((u32 *) (ring->bounce_buf + i));
464 }
465
466 /* Return real descriptor location */
467 return ring->buf + index * TXBB_SIZE;
468}
469
470static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
471{
472 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
473 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
474
475 /* If we don't have a pending timer, set one up to catch our recent
476 post in case the interface becomes idle */
477 if (!timer_pending(&cq->timer))
478 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
479
480 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
481 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
482 mlx4_en_process_tx_cq(priv->dev, cq);
483}
484
485static void *get_frag_ptr(struct sk_buff *skb)
486{
487 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
488 struct page *page = frag->page;
489 void *ptr;
490
491 ptr = page_address(page);
492 if (unlikely(!ptr))
493 return NULL;
494
495 return ptr + frag->page_offset;
496}
497
498static int is_inline(struct sk_buff *skb, void **pfrag)
499{
500 void *ptr;
501
502 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
503 if (skb_shinfo(skb)->nr_frags == 1) {
504 ptr = get_frag_ptr(skb);
505 if (unlikely(!ptr))
506 return 0;
507
508 if (pfrag)
509 *pfrag = ptr;
510
511 return 1;
512 } else if (unlikely(skb_shinfo(skb)->nr_frags))
513 return 0;
514 else
515 return 1;
516 }
517
518 return 0;
519}
520
521static int inline_size(struct sk_buff *skb)
522{
523 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
524 <= MLX4_INLINE_ALIGN)
525 return ALIGN(skb->len + CTRL_SIZE +
526 sizeof(struct mlx4_wqe_inline_seg), 16);
527 else
528 return ALIGN(skb->len + CTRL_SIZE + 2 *
529 sizeof(struct mlx4_wqe_inline_seg), 16);
530}
531
532static int get_real_size(struct sk_buff *skb, struct net_device *dev,
533 int *lso_header_size)
534{
535 struct mlx4_en_priv *priv = netdev_priv(dev);
536 struct mlx4_en_dev *mdev = priv->mdev;
537 int real_size;
538
539 if (skb_is_gso(skb)) {
540 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
541 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
542 ALIGN(*lso_header_size + 4, DS_SIZE);
543 if (unlikely(*lso_header_size != skb_headlen(skb))) {
544 /* We add a segment for the skb linear buffer only if
545 * it contains data */
546 if (*lso_header_size < skb_headlen(skb))
547 real_size += DS_SIZE;
548 else {
549 if (netif_msg_tx_err(priv))
550 mlx4_warn(mdev, "Non-linear headers\n");
551 dev_kfree_skb_any(skb);
552 return 0;
553 }
554 }
555 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
556 if (netif_msg_tx_err(priv))
557 mlx4_warn(mdev, "LSO header size too big\n");
558 dev_kfree_skb_any(skb);
559 return 0;
560 }
561 } else {
562 *lso_header_size = 0;
563 if (!is_inline(skb, NULL))
564 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
565 else
566 real_size = inline_size(skb);
567 }
568
569 return real_size;
570}
571
572static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
573 int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
574{
575 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
576 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
577
578 if (skb->len <= spc) {
579 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
580 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
581 if (skb_shinfo(skb)->nr_frags)
582 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
583 skb_shinfo(skb)->frags[0].size);
584
585 } else {
586 inl->byte_count = cpu_to_be32(1 << 31 | spc);
587 if (skb_headlen(skb) <= spc) {
588 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
589 if (skb_headlen(skb) < spc) {
590 memcpy(((void *)(inl + 1)) + skb_headlen(skb),
591 fragptr, spc - skb_headlen(skb));
592 fragptr += spc - skb_headlen(skb);
593 }
594 inl = (void *) (inl + 1) + spc;
595 memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
596 } else {
597 skb_copy_from_linear_data(skb, inl + 1, spc);
598 inl = (void *) (inl + 1) + spc;
599 skb_copy_from_linear_data_offset(skb, spc, inl + 1,
600 skb_headlen(skb) - spc);
601 if (skb_shinfo(skb)->nr_frags)
602 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
603 fragptr, skb_shinfo(skb)->frags[0].size);
604 }
605
606 wmb();
607 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
608 }
609 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
610 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
611 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
612}
613
614static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
615 u16 *vlan_tag)
616{
617 int tx_ind;
618
619 /* Obtain VLAN information if present */
620 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
621 *vlan_tag = vlan_tx_tag_get(skb);
622 /* Set the Tx ring to use according to vlan priority */
623 tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
624 } else {
625 *vlan_tag = 0;
626 tx_ind = 0;
627 }
628 return tx_ind;
629}
630
631int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
632{
633 struct mlx4_en_priv *priv = netdev_priv(dev);
634 struct mlx4_en_dev *mdev = priv->mdev;
635 struct mlx4_en_tx_ring *ring;
636 struct mlx4_en_cq *cq;
637 struct mlx4_en_tx_desc *tx_desc;
638 struct mlx4_wqe_data_seg *data;
639 struct skb_frag_struct *frag;
640 struct mlx4_en_tx_info *tx_info;
641 int tx_ind = 0;
642 int nr_txbb;
643 int desc_size;
644 int real_size;
645 dma_addr_t dma;
646 u32 index;
647 __be32 op_own;
648 u16 vlan_tag;
649 int i;
650 int lso_header_size;
651 void *fragptr;
652
653 if (unlikely(!skb->len)) {
654 dev_kfree_skb_any(skb);
655 return NETDEV_TX_OK;
656 }
657 real_size = get_real_size(skb, dev, &lso_header_size);
658 if (unlikely(!real_size))
659 return NETDEV_TX_OK;
660
661 /* Allign descriptor to TXBB size */
662 desc_size = ALIGN(real_size, TXBB_SIZE);
663 nr_txbb = desc_size / TXBB_SIZE;
664 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
665 if (netif_msg_tx_err(priv))
666 mlx4_warn(mdev, "Oversized header or SG list\n");
667 dev_kfree_skb_any(skb);
668 return NETDEV_TX_OK;
669 }
670
671 tx_ind = get_vlan_info(priv, skb, &vlan_tag);
672 ring = &priv->tx_ring[tx_ind];
673
674 /* Check available TXBBs And 2K spare for prefetch */
675 if (unlikely(((int)(ring->prod - ring->cons)) >
676 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
677 /* every full Tx ring stops queue.
678 * TODO: implement multi-queue support (per-queue stop) */
679 netif_stop_queue(dev);
680 ring->blocked = 1;
681 priv->port_stats.queue_stopped++;
682
683 /* Use interrupts to find out when queue opened */
684 cq = &priv->tx_cq[tx_ind];
685 mlx4_en_arm_cq(priv, cq);
686 return NETDEV_TX_BUSY;
687 }
688
689 /* Now that we know what Tx ring to use */
690 if (unlikely(!priv->port_up)) {
691 if (netif_msg_tx_err(priv))
692 mlx4_warn(mdev, "xmit: port down!\n");
693 dev_kfree_skb_any(skb);
694 return NETDEV_TX_OK;
695 }
696
697 /* Track current inflight packets for performance analysis */
698 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
699 (u32) (ring->prod - ring->cons - 1));
700
701 /* Packet is good - grab an index and transmit it */
702 index = ring->prod & ring->size_mask;
703
704 /* See if we have enough space for whole descriptor TXBB for setting
705 * SW ownership on next descriptor; if not, use a bounce buffer. */
706 if (likely(index + nr_txbb <= ring->size))
707 tx_desc = ring->buf + index * TXBB_SIZE;
708 else
709 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
710
711 /* Save skb in tx_info ring */
712 tx_info = &ring->tx_info[index];
713 tx_info->skb = skb;
714 tx_info->nr_txbb = nr_txbb;
715
716 /* Prepare ctrl segement apart opcode+ownership, which depends on
717 * whether LSO is used */
718 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
719 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
720 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
721 tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
722 MLX4_WQE_CTRL_SOLICITED);
723 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
724 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
725 MLX4_WQE_CTRL_TCP_UDP_CSUM);
726 priv->port_stats.tx_chksum_offload++;
727 }
728
729 /* Handle LSO (TSO) packets */
730 if (lso_header_size) {
731 /* Mark opcode as LSO */
732 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
733 ((ring->prod & ring->size) ?
734 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
735
736 /* Fill in the LSO prefix */
737 tx_desc->lso.mss_hdr_size = cpu_to_be32(
738 skb_shinfo(skb)->gso_size << 16 | lso_header_size);
739
740 /* Copy headers;
741 * note that we already verified that it is linear */
742 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
743 data = ((void *) &tx_desc->lso +
744 ALIGN(lso_header_size + 4, DS_SIZE));
745
746 priv->port_stats.tso_packets++;
747 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
748 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
749 ring->bytes += skb->len + (i - 1) * lso_header_size;
750 ring->packets += i;
751 } else {
752 /* Normal (Non LSO) packet */
753 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
754 ((ring->prod & ring->size) ?
755 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
756 data = &tx_desc->data;
757 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
758 ring->packets++;
759
760 }
761 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
762
763
764 /* valid only for none inline segments */
765 tx_info->data_offset = (void *) data - (void *) tx_desc;
766
767 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
768 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
769
770 if (!is_inline(skb, &fragptr)) {
771 /* Map fragments */
772 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
773 frag = &skb_shinfo(skb)->frags[i];
774 dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
775 frag->size, PCI_DMA_TODEVICE);
776 data->addr = cpu_to_be64(dma);
777 data->lkey = cpu_to_be32(mdev->mr.key);
778 wmb();
779 data->byte_count = cpu_to_be32(frag->size);
780 --data;
781 }
782
783 /* Map linear part */
784 if (tx_info->linear) {
785 dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
786 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
787 data->addr = cpu_to_be64(dma);
788 data->lkey = cpu_to_be32(mdev->mr.key);
789 wmb();
790 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
791 }
792 } else
793 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
794
795 ring->prod += nr_txbb;
796
797 /* If we used a bounce buffer then copy descriptor back into place */
798 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
799 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
800
801 /* Run destructor before passing skb to HW */
802 if (likely(!skb_shared(skb)))
803 skb_orphan(skb);
804
805 /* Ensure new descirptor hits memory
806 * before setting ownership of this descriptor to HW */
807 wmb();
808 tx_desc->ctrl.owner_opcode = op_own;
809
810 /* Ring doorbell! */
811 wmb();
812 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
813 dev->trans_start = jiffies;
814
815 /* Poll CQ here */
816 mlx4_en_xmit_poll(priv, tx_ind);
817
818 return 0;
819}
820
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8a8b56135a58..de169338cd90 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
558 int i; 558 int i;
559 559
560 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 560 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
561 dev->caps.num_eqs - 1, dev->caps.reserved_eqs); 561 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
562 if (err) 562 if (err)
563 return err; 563 return err;
564 564
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7e32955da982..cee199ceba2f 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -88,6 +88,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
88 [ 8] = "P_Key violation counter", 88 [ 8] = "P_Key violation counter",
89 [ 9] = "Q_Key violation counter", 89 [ 9] = "Q_Key violation counter",
90 [10] = "VMM", 90 [10] = "VMM",
91 [12] = "DPDP",
91 [16] = "MW support", 92 [16] = "MW support",
92 [17] = "APM support", 93 [17] = "APM support",
93 [18] = "Atomic ops support", 94 [18] = "Atomic ops support",
@@ -346,7 +347,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
346 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 347 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
347 dev_cap->max_vl[i] = field >> 4; 348 dev_cap->max_vl[i] = field >> 4;
348 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 349 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
349 dev_cap->max_mtu[i] = field >> 4; 350 dev_cap->ib_mtu[i] = field >> 4;
350 dev_cap->max_port_width[i] = field & 0xf; 351 dev_cap->max_port_width[i] = field & 0xf;
351 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 352 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
352 dev_cap->max_gids[i] = 1 << (field & 0xf); 353 dev_cap->max_gids[i] = 1 << (field & 0xf);
@@ -354,10 +355,14 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
354 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 355 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
355 } 356 }
356 } else { 357 } else {
358#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
357#define QUERY_PORT_MTU_OFFSET 0x01 359#define QUERY_PORT_MTU_OFFSET 0x01
360#define QUERY_PORT_ETH_MTU_OFFSET 0x02
358#define QUERY_PORT_WIDTH_OFFSET 0x06 361#define QUERY_PORT_WIDTH_OFFSET 0x06
359#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 362#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
363#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
360#define QUERY_PORT_MAX_VL_OFFSET 0x0b 364#define QUERY_PORT_MAX_VL_OFFSET 0x0b
365#define QUERY_PORT_MAC_OFFSET 0x10
361 366
362 for (i = 1; i <= dev_cap->num_ports; ++i) { 367 for (i = 1; i <= dev_cap->num_ports; ++i) {
363 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 368 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
@@ -365,8 +370,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
365 if (err) 370 if (err)
366 goto out; 371 goto out;
367 372
373 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
374 dev_cap->supported_port_types[i] = field & 3;
368 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 375 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
369 dev_cap->max_mtu[i] = field & 0xf; 376 dev_cap->ib_mtu[i] = field & 0xf;
370 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 377 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
371 dev_cap->max_port_width[i] = field & 0xf; 378 dev_cap->max_port_width[i] = field & 0xf;
372 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 379 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
@@ -374,6 +381,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
374 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 381 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
375 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 382 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
376 dev_cap->max_vl[i] = field & 0xf; 383 dev_cap->max_vl[i] = field & 0xf;
384 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
385 dev_cap->log_max_macs[i] = field & 0xf;
386 dev_cap->log_max_vlans[i] = field >> 4;
387 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
388 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
377 } 389 }
378 } 390 }
379 391
@@ -407,7 +419,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
407 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 419 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
408 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 420 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
409 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 421 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
410 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1], 422 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
411 dev_cap->max_port_width[1]); 423 dev_cap->max_port_width[1]);
412 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 424 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
413 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 425 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
@@ -819,7 +831,7 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
819 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 831 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
820 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 832 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
821 833
822 field = 128 << dev->caps.mtu_cap[port]; 834 field = 128 << dev->caps.ib_mtu_cap[port];
823 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 835 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
824 field = dev->caps.gid_table_len[port]; 836 field = dev->caps.gid_table_len[port];
825 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 837 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index decbb5c2ad41..526d7f30c041 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -66,11 +66,13 @@ struct mlx4_dev_cap {
66 int local_ca_ack_delay; 66 int local_ca_ack_delay;
67 int num_ports; 67 int num_ports;
68 u32 max_msg_sz; 68 u32 max_msg_sz;
69 int max_mtu[MLX4_MAX_PORTS + 1]; 69 int ib_mtu[MLX4_MAX_PORTS + 1];
70 int max_port_width[MLX4_MAX_PORTS + 1]; 70 int max_port_width[MLX4_MAX_PORTS + 1];
71 int max_vl[MLX4_MAX_PORTS + 1]; 71 int max_vl[MLX4_MAX_PORTS + 1];
72 int max_gids[MLX4_MAX_PORTS + 1]; 72 int max_gids[MLX4_MAX_PORTS + 1];
73 int max_pkeys[MLX4_MAX_PORTS + 1]; 73 int max_pkeys[MLX4_MAX_PORTS + 1];
74 u64 def_mac[MLX4_MAX_PORTS + 1];
75 u16 eth_mtu[MLX4_MAX_PORTS + 1];
74 u16 stat_rate_support; 76 u16 stat_rate_support;
75 u32 flags; 77 u32 flags;
76 int reserved_uars; 78 int reserved_uars;
@@ -102,6 +104,9 @@ struct mlx4_dev_cap {
102 u32 reserved_lkey; 104 u32 reserved_lkey;
103 u64 max_icm_sz; 105 u64 max_icm_sz;
104 int max_gso_sz; 106 int max_gso_sz;
107 u8 supported_port_types[MLX4_MAX_PORTS + 1];
108 u8 log_max_macs[MLX4_MAX_PORTS + 1];
109 u8 log_max_vlans[MLX4_MAX_PORTS + 1];
105}; 110};
106 111
107struct mlx4_adapter { 112struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1252a919de2e..90a0281d15ea 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -85,6 +85,57 @@ static struct mlx4_profile default_profile = {
85 .num_mtt = 1 << 20, 85 .num_mtt = 1 << 20,
86}; 86};
87 87
88static int log_num_mac = 2;
89module_param_named(log_num_mac, log_num_mac, int, 0444);
90MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
91
92static int log_num_vlan;
93module_param_named(log_num_vlan, log_num_vlan, int, 0444);
94MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
95
96static int use_prio;
97module_param_named(use_prio, use_prio, bool, 0444);
98MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
99 "(0/1, default 0)");
100
101static int mlx4_check_port_params(struct mlx4_dev *dev,
102 enum mlx4_port_type *port_type)
103{
104 int i;
105
106 for (i = 0; i < dev->caps.num_ports - 1; i++) {
107 if (port_type[i] != port_type[i+1] &&
108 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
109 mlx4_err(dev, "Only same port types supported "
110 "on this HCA, aborting.\n");
111 return -EINVAL;
112 }
113 }
114 if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
115 (port_type[1] == MLX4_PORT_TYPE_IB)) {
116 mlx4_err(dev, "eth-ib configuration is not supported.\n");
117 return -EINVAL;
118 }
119
120 for (i = 0; i < dev->caps.num_ports; i++) {
121 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
122 mlx4_err(dev, "Requested port type for port %d is not "
123 "supported on this HCA\n", i + 1);
124 return -EINVAL;
125 }
126 }
127 return 0;
128}
129
130static void mlx4_set_port_mask(struct mlx4_dev *dev)
131{
132 int i;
133
134 dev->caps.port_mask = 0;
135 for (i = 1; i <= dev->caps.num_ports; ++i)
136 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
137 dev->caps.port_mask |= 1 << (i - 1);
138}
88static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 139static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89{ 140{
90 int err; 141 int err;
@@ -120,10 +171,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
120 dev->caps.num_ports = dev_cap->num_ports; 171 dev->caps.num_ports = dev_cap->num_ports;
121 for (i = 1; i <= dev->caps.num_ports; ++i) { 172 for (i = 1; i <= dev->caps.num_ports; ++i) {
122 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 173 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
123 dev->caps.mtu_cap[i] = dev_cap->max_mtu[i]; 174 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
124 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 175 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
125 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 176 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
126 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 177 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
178 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
179 dev->caps.def_mac[i] = dev_cap->def_mac[i];
180 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
127 } 181 }
128 182
129 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 183 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -134,7 +188,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
134 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 188 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
135 dev->caps.max_wqes = dev_cap->max_qp_sz; 189 dev->caps.max_wqes = dev_cap->max_qp_sz;
136 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 190 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
137 dev->caps.reserved_qps = dev_cap->reserved_qps;
138 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 191 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
139 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 192 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
140 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 193 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
@@ -163,9 +216,138 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
163 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 216 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
164 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 217 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
165 218
219 dev->caps.log_num_macs = log_num_mac;
220 dev->caps.log_num_vlans = log_num_vlan;
221 dev->caps.log_num_prios = use_prio ? 3 : 0;
222
223 for (i = 1; i <= dev->caps.num_ports; ++i) {
224 if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
225 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
226 else
227 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
228
229 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
230 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
231 mlx4_warn(dev, "Requested number of MACs is too much "
232 "for port %d, reducing to %d.\n",
233 i, 1 << dev->caps.log_num_macs);
234 }
235 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
236 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
237 mlx4_warn(dev, "Requested number of VLANs is too much "
238 "for port %d, reducing to %d.\n",
239 i, 1 << dev->caps.log_num_vlans);
240 }
241 }
242
243 mlx4_set_port_mask(dev);
244
245 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
246 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
247 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
248 (1 << dev->caps.log_num_macs) *
249 (1 << dev->caps.log_num_vlans) *
250 (1 << dev->caps.log_num_prios) *
251 dev->caps.num_ports;
252 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
253
254 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
255 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
256 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
257 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
258
166 return 0; 259 return 0;
167} 260}
168 261
262/*
263 * Change the port configuration of the device.
264 * Every user of this function must hold the port mutex.
265 */
266static int mlx4_change_port_types(struct mlx4_dev *dev,
267 enum mlx4_port_type *port_types)
268{
269 int err = 0;
270 int change = 0;
271 int port;
272
273 for (port = 0; port < dev->caps.num_ports; port++) {
274 if (port_types[port] != dev->caps.port_type[port + 1]) {
275 change = 1;
276 dev->caps.port_type[port + 1] = port_types[port];
277 }
278 }
279 if (change) {
280 mlx4_unregister_device(dev);
281 for (port = 1; port <= dev->caps.num_ports; port++) {
282 mlx4_CLOSE_PORT(dev, port);
283 err = mlx4_SET_PORT(dev, port);
284 if (err) {
285 mlx4_err(dev, "Failed to set port %d, "
286 "aborting\n", port);
287 goto out;
288 }
289 }
290 mlx4_set_port_mask(dev);
291 err = mlx4_register_device(dev);
292 }
293
294out:
295 return err;
296}
297
298static ssize_t show_port_type(struct device *dev,
299 struct device_attribute *attr,
300 char *buf)
301{
302 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
303 port_attr);
304 struct mlx4_dev *mdev = info->dev;
305
306 return sprintf(buf, "%s\n",
307 mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
308 "ib" : "eth");
309}
310
311static ssize_t set_port_type(struct device *dev,
312 struct device_attribute *attr,
313 const char *buf, size_t count)
314{
315 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
316 port_attr);
317 struct mlx4_dev *mdev = info->dev;
318 struct mlx4_priv *priv = mlx4_priv(mdev);
319 enum mlx4_port_type types[MLX4_MAX_PORTS];
320 int i;
321 int err = 0;
322
323 if (!strcmp(buf, "ib\n"))
324 info->tmp_type = MLX4_PORT_TYPE_IB;
325 else if (!strcmp(buf, "eth\n"))
326 info->tmp_type = MLX4_PORT_TYPE_ETH;
327 else {
328 mlx4_err(mdev, "%s is not supported port type\n", buf);
329 return -EINVAL;
330 }
331
332 mutex_lock(&priv->port_mutex);
333 for (i = 0; i < mdev->caps.num_ports; i++)
334 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
335 mdev->caps.port_type[i+1];
336
337 err = mlx4_check_port_params(mdev, types);
338 if (err)
339 goto out;
340
341 for (i = 1; i <= mdev->caps.num_ports; i++)
342 priv->port[i].tmp_type = 0;
343
344 err = mlx4_change_port_types(mdev, types);
345
346out:
347 mutex_unlock(&priv->port_mutex);
348 return err ? err : count;
349}
350
169static int mlx4_load_fw(struct mlx4_dev *dev) 351static int mlx4_load_fw(struct mlx4_dev *dev)
170{ 352{
171 struct mlx4_priv *priv = mlx4_priv(dev); 353 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -211,7 +393,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
211 ((u64) (MLX4_CMPT_TYPE_QP * 393 ((u64) (MLX4_CMPT_TYPE_QP *
212 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 394 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
213 cmpt_entry_sz, dev->caps.num_qps, 395 cmpt_entry_sz, dev->caps.num_qps,
214 dev->caps.reserved_qps, 0, 0); 396 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
397 0, 0);
215 if (err) 398 if (err)
216 goto err; 399 goto err;
217 400
@@ -336,7 +519,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
336 init_hca->qpc_base, 519 init_hca->qpc_base,
337 dev_cap->qpc_entry_sz, 520 dev_cap->qpc_entry_sz,
338 dev->caps.num_qps, 521 dev->caps.num_qps,
339 dev->caps.reserved_qps, 0, 0); 522 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
523 0, 0);
340 if (err) { 524 if (err) {
341 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 525 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
342 goto err_unmap_dmpt; 526 goto err_unmap_dmpt;
@@ -346,7 +530,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
346 init_hca->auxc_base, 530 init_hca->auxc_base,
347 dev_cap->aux_entry_sz, 531 dev_cap->aux_entry_sz,
348 dev->caps.num_qps, 532 dev->caps.num_qps,
349 dev->caps.reserved_qps, 0, 0); 533 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
534 0, 0);
350 if (err) { 535 if (err) {
351 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 536 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
352 goto err_unmap_qp; 537 goto err_unmap_qp;
@@ -356,7 +541,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
356 init_hca->altc_base, 541 init_hca->altc_base,
357 dev_cap->altc_entry_sz, 542 dev_cap->altc_entry_sz,
358 dev->caps.num_qps, 543 dev->caps.num_qps,
359 dev->caps.reserved_qps, 0, 0); 544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
545 0, 0);
360 if (err) { 546 if (err) {
361 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 547 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
362 goto err_unmap_auxc; 548 goto err_unmap_auxc;
@@ -366,7 +552,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
366 init_hca->rdmarc_base, 552 init_hca->rdmarc_base,
367 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 553 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
368 dev->caps.num_qps, 554 dev->caps.num_qps,
369 dev->caps.reserved_qps, 0, 0); 555 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
556 0, 0);
370 if (err) { 557 if (err) {
371 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 558 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
372 goto err_unmap_altc; 559 goto err_unmap_altc;
@@ -565,6 +752,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
565{ 752{
566 struct mlx4_priv *priv = mlx4_priv(dev); 753 struct mlx4_priv *priv = mlx4_priv(dev);
567 int err; 754 int err;
755 int port;
756 __be32 ib_port_default_caps;
568 757
569 err = mlx4_init_uar_table(dev); 758 err = mlx4_init_uar_table(dev);
570 if (err) { 759 if (err) {
@@ -663,8 +852,27 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
663 goto err_qp_table_free; 852 goto err_qp_table_free;
664 } 853 }
665 854
855 for (port = 1; port <= dev->caps.num_ports; port++) {
856 ib_port_default_caps = 0;
857 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
858 if (err)
859 mlx4_warn(dev, "failed to get port %d default "
860 "ib capabilities (%d). Continuing with "
861 "caps = 0\n", port, err);
862 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
863 err = mlx4_SET_PORT(dev, port);
864 if (err) {
865 mlx4_err(dev, "Failed to set port %d, aborting\n",
866 port);
867 goto err_mcg_table_free;
868 }
869 }
870
666 return 0; 871 return 0;
667 872
873err_mcg_table_free:
874 mlx4_cleanup_mcg_table(dev);
875
668err_qp_table_free: 876err_qp_table_free:
669 mlx4_cleanup_qp_table(dev); 877 mlx4_cleanup_qp_table(dev);
670 878
@@ -728,11 +936,45 @@ no_msi:
728 priv->eq_table.eq[i].irq = dev->pdev->irq; 936 priv->eq_table.eq[i].irq = dev->pdev->irq;
729} 937}
730 938
939static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
940{
941 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
942 int err = 0;
943
944 info->dev = dev;
945 info->port = port;
946 mlx4_init_mac_table(dev, &info->mac_table);
947 mlx4_init_vlan_table(dev, &info->vlan_table);
948
949 sprintf(info->dev_name, "mlx4_port%d", port);
950 info->port_attr.attr.name = info->dev_name;
951 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
952 info->port_attr.show = show_port_type;
953 info->port_attr.store = set_port_type;
954
955 err = device_create_file(&dev->pdev->dev, &info->port_attr);
956 if (err) {
957 mlx4_err(dev, "Failed to create file for port %d\n", port);
958 info->port = -1;
959 }
960
961 return err;
962}
963
964static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
965{
966 if (info->port < 0)
967 return;
968
969 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
970}
971
731static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 972static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
732{ 973{
733 struct mlx4_priv *priv; 974 struct mlx4_priv *priv;
734 struct mlx4_dev *dev; 975 struct mlx4_dev *dev;
735 int err; 976 int err;
977 int port;
736 978
737 printk(KERN_INFO PFX "Initializing %s\n", 979 printk(KERN_INFO PFX "Initializing %s\n",
738 pci_name(pdev)); 980 pci_name(pdev));
@@ -807,6 +1049,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
807 INIT_LIST_HEAD(&priv->ctx_list); 1049 INIT_LIST_HEAD(&priv->ctx_list);
808 spin_lock_init(&priv->ctx_lock); 1050 spin_lock_init(&priv->ctx_lock);
809 1051
1052 mutex_init(&priv->port_mutex);
1053
810 INIT_LIST_HEAD(&priv->pgdir_list); 1054 INIT_LIST_HEAD(&priv->pgdir_list);
811 mutex_init(&priv->pgdir_mutex); 1055 mutex_init(&priv->pgdir_mutex);
812 1056
@@ -842,15 +1086,24 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
842 if (err) 1086 if (err)
843 goto err_close; 1087 goto err_close;
844 1088
1089 for (port = 1; port <= dev->caps.num_ports; port++) {
1090 err = mlx4_init_port_info(dev, port);
1091 if (err)
1092 goto err_port;
1093 }
1094
845 err = mlx4_register_device(dev); 1095 err = mlx4_register_device(dev);
846 if (err) 1096 if (err)
847 goto err_cleanup; 1097 goto err_port;
848 1098
849 pci_set_drvdata(pdev, dev); 1099 pci_set_drvdata(pdev, dev);
850 1100
851 return 0; 1101 return 0;
852 1102
853err_cleanup: 1103err_port:
1104 for (port = 1; port <= dev->caps.num_ports; port++)
1105 mlx4_cleanup_port_info(&priv->port[port]);
1106
854 mlx4_cleanup_mcg_table(dev); 1107 mlx4_cleanup_mcg_table(dev);
855 mlx4_cleanup_qp_table(dev); 1108 mlx4_cleanup_qp_table(dev);
856 mlx4_cleanup_srq_table(dev); 1109 mlx4_cleanup_srq_table(dev);
@@ -907,8 +1160,10 @@ static void mlx4_remove_one(struct pci_dev *pdev)
907 if (dev) { 1160 if (dev) {
908 mlx4_unregister_device(dev); 1161 mlx4_unregister_device(dev);
909 1162
910 for (p = 1; p <= dev->caps.num_ports; ++p) 1163 for (p = 1; p <= dev->caps.num_ports; p++) {
1164 mlx4_cleanup_port_info(&priv->port[p]);
911 mlx4_CLOSE_PORT(dev, p); 1165 mlx4_CLOSE_PORT(dev, p);
1166 }
912 1167
913 mlx4_cleanup_mcg_table(dev); 1168 mlx4_cleanup_mcg_table(dev);
914 mlx4_cleanup_qp_table(dev); 1169 mlx4_cleanup_qp_table(dev);
@@ -948,6 +1203,8 @@ static struct pci_device_id mlx4_pci_table[] = {
948 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 1203 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
949 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ 1204 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
950 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ 1205 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1206 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1207 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
951 { 0, } 1208 { 0, }
952}; 1209};
953 1210
@@ -960,10 +1217,28 @@ static struct pci_driver mlx4_driver = {
960 .remove = __devexit_p(mlx4_remove_one) 1217 .remove = __devexit_p(mlx4_remove_one)
961}; 1218};
962 1219
1220static int __init mlx4_verify_params(void)
1221{
1222 if ((log_num_mac < 0) || (log_num_mac > 7)) {
1223 printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac);
1224 return -1;
1225 }
1226
1227 if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
1228 printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan);
1229 return -1;
1230 }
1231
1232 return 0;
1233}
1234
963static int __init mlx4_init(void) 1235static int __init mlx4_init(void)
964{ 1236{
965 int ret; 1237 int ret;
966 1238
1239 if (mlx4_verify_params())
1240 return -EINVAL;
1241
967 ret = mlx4_catas_init(); 1242 ret = mlx4_catas_init();
968 if (ret) 1243 if (ret)
969 return ret; 1244 return ret;
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c83f88ce0736..592c01ae2c5d 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
368 struct mlx4_priv *priv = mlx4_priv(dev); 368 struct mlx4_priv *priv = mlx4_priv(dev);
369 int err; 369 int err;
370 370
371 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, 371 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
372 dev->caps.num_amgms, dev->caps.num_amgms - 1, 0); 372 dev->caps.num_amgms - 1, 0, 0);
373 if (err) 373 if (err)
374 return err; 374 return err;
375 375
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5337e3ac3e78..34c909deaff3 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -87,6 +87,9 @@ enum {
87 87
88#ifdef CONFIG_MLX4_DEBUG 88#ifdef CONFIG_MLX4_DEBUG
89extern int mlx4_debug_level; 89extern int mlx4_debug_level;
90#else /* CONFIG_MLX4_DEBUG */
91#define mlx4_debug_level (0)
92#endif /* CONFIG_MLX4_DEBUG */
90 93
91#define mlx4_dbg(mdev, format, arg...) \ 94#define mlx4_dbg(mdev, format, arg...) \
92 do { \ 95 do { \
@@ -94,12 +97,6 @@ extern int mlx4_debug_level;
94 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \ 97 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
95 } while (0) 98 } while (0)
96 99
97#else /* CONFIG_MLX4_DEBUG */
98
99#define mlx4_dbg(mdev, format, arg...) do { (void) mdev; } while (0)
100
101#endif /* CONFIG_MLX4_DEBUG */
102
103#define mlx4_err(mdev, format, arg...) \ 100#define mlx4_err(mdev, format, arg...) \
104 dev_err(&mdev->pdev->dev, format, ## arg) 101 dev_err(&mdev->pdev->dev, format, ## arg)
105#define mlx4_info(mdev, format, arg...) \ 102#define mlx4_info(mdev, format, arg...) \
@@ -111,6 +108,7 @@ struct mlx4_bitmap {
111 u32 last; 108 u32 last;
112 u32 top; 109 u32 top;
113 u32 max; 110 u32 max;
111 u32 reserved_top;
114 u32 mask; 112 u32 mask;
115 spinlock_t lock; 113 spinlock_t lock;
116 unsigned long *table; 114 unsigned long *table;
@@ -251,6 +249,38 @@ struct mlx4_catas_err {
251 struct list_head list; 249 struct list_head list;
252}; 250};
253 251
252#define MLX4_MAX_MAC_NUM 128
253#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3)
254
255struct mlx4_mac_table {
256 __be64 entries[MLX4_MAX_MAC_NUM];
257 int refs[MLX4_MAX_MAC_NUM];
258 struct mutex mutex;
259 int total;
260 int max;
261};
262
263#define MLX4_MAX_VLAN_NUM 128
264#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
265
266struct mlx4_vlan_table {
267 __be32 entries[MLX4_MAX_VLAN_NUM];
268 int refs[MLX4_MAX_VLAN_NUM];
269 struct mutex mutex;
270 int total;
271 int max;
272};
273
274struct mlx4_port_info {
275 struct mlx4_dev *dev;
276 int port;
277 char dev_name[16];
278 struct device_attribute port_attr;
279 enum mlx4_port_type tmp_type;
280 struct mlx4_mac_table mac_table;
281 struct mlx4_vlan_table vlan_table;
282};
283
254struct mlx4_priv { 284struct mlx4_priv {
255 struct mlx4_dev dev; 285 struct mlx4_dev dev;
256 286
@@ -279,6 +309,8 @@ struct mlx4_priv {
279 309
280 struct mlx4_uar driver_uar; 310 struct mlx4_uar driver_uar;
281 void __iomem *kar; 311 void __iomem *kar;
312 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
313 struct mutex port_mutex;
282}; 314};
283 315
284static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 316static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -288,7 +320,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
288 320
289u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); 321u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
290void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 322void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
291int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved); 323u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
324void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
325int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
326 u32 reserved_bot, u32 resetrved_top);
292void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 327void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
293 328
294int mlx4_reset(struct mlx4_dev *dev); 329int mlx4_reset(struct mlx4_dev *dev);
@@ -346,4 +381,10 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
346 381
347void mlx4_handle_catas_err(struct mlx4_dev *dev); 382void mlx4_handle_catas_err(struct mlx4_dev *dev);
348 383
384void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
385void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
386
387int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
388int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
389
349#endif /* MLX4_H */ 390#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
new file mode 100644
index 000000000000..98ddc0811f93
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -0,0 +1,561 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef _MLX4_EN_H_
35#define _MLX4_EN_H_
36
37#include <linux/compiler.h>
38#include <linux/list.h>
39#include <linux/mutex.h>
40#include <linux/netdevice.h>
41#include <linux/inet_lro.h>
42
43#include <linux/mlx4/device.h>
44#include <linux/mlx4/qp.h>
45#include <linux/mlx4/cq.h>
46#include <linux/mlx4/srq.h>
47#include <linux/mlx4/doorbell.h>
48
49#include "en_port.h"
50
51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.4.0"
53#define DRV_RELDATE "Sep 2008"
54
55
56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
57
58#define mlx4_dbg(mlevel, priv, format, arg...) \
59 if (NETIF_MSG_##mlevel & priv->msg_enable) \
60 printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
61 (&priv->mdev->pdev->dev)->bus_id , ## arg)
62
63#define mlx4_err(mdev, format, arg...) \
64 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
65 (&mdev->pdev->dev)->bus_id , ## arg)
66#define mlx4_info(mdev, format, arg...) \
67 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
68 (&mdev->pdev->dev)->bus_id , ## arg)
69#define mlx4_warn(mdev, format, arg...) \
70 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
71 (&mdev->pdev->dev)->bus_id , ## arg)
72
73/*
74 * Device constants
75 */
76
77
78#define MLX4_EN_PAGE_SHIFT 12
79#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
80#define MAX_TX_RINGS 16
81#define MAX_RX_RINGS 16
82#define MAX_RSS_MAP_SIZE 64
83#define RSS_FACTOR 2
84#define TXBB_SIZE 64
85#define HEADROOM (2048 / TXBB_SIZE + 1)
86#define MAX_LSO_HDR_SIZE 92
87#define STAMP_STRIDE 64
88#define STAMP_DWORDS (STAMP_STRIDE / 4)
89#define STAMP_SHIFT 31
90#define STAMP_VAL 0x7fffffff
91#define STATS_DELAY (HZ / 4)
92
93/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
94#define MAX_DESC_SIZE 512
95#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
96
97/*
98 * OS related constants and tunables
99 */
100
101#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
102
103#define MLX4_EN_ALLOC_ORDER 2
104#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
105
106#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
107
108/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
109 * and 4K allocations) */
110enum {
111 FRAG_SZ0 = 512 - NET_IP_ALIGN,
112 FRAG_SZ1 = 1024,
113 FRAG_SZ2 = 4096,
114 FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
115};
116#define MLX4_EN_MAX_RX_FRAGS 4
117
118/* Minimum ring size for our page-allocation sceme to work */
119#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
120#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
121
122#define MLX4_EN_TX_RING_NUM 9
123#define MLX4_EN_DEF_TX_RING_SIZE 1024
124#define MLX4_EN_DEF_RX_RING_SIZE 1024
125
126/* Target number of bytes to coalesce with interrupt moderation */
127#define MLX4_EN_RX_COAL_TARGET 0x20000
128#define MLX4_EN_RX_COAL_TIME 0x10
129
130#define MLX4_EN_TX_COAL_PKTS 5
131#define MLX4_EN_TX_COAL_TIME 0x80
132
133#define MLX4_EN_RX_RATE_LOW 400000
134#define MLX4_EN_RX_COAL_TIME_LOW 0
135#define MLX4_EN_RX_RATE_HIGH 450000
136#define MLX4_EN_RX_COAL_TIME_HIGH 128
137#define MLX4_EN_RX_SIZE_THRESH 1024
138#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
139#define MLX4_EN_SAMPLE_INTERVAL 0
140
141#define MLX4_EN_AUTO_CONF 0xffff
142
143#define MLX4_EN_DEF_RX_PAUSE 1
144#define MLX4_EN_DEF_TX_PAUSE 1
145
146/* Interval between sucessive polls in the Tx routine when polling is used
147 instead of interrupts (in per-core Tx rings) - should be power of 2 */
148#define MLX4_EN_TX_POLL_MODER 16
149#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
150
151#define ETH_LLC_SNAP_SIZE 8
152
153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
155
156#define MLX4_EN_MIN_MTU 46
157#define ETH_BCAST 0xffffffffffffULL
158
159#ifdef MLX4_EN_PERF_STAT
160/* Number of samples to 'average' */
161#define AVG_SIZE 128
162#define AVG_FACTOR 1024
163#define NUM_PERF_STATS NUM_PERF_COUNTERS
164
165#define INC_PERF_COUNTER(cnt) (++(cnt))
166#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
167#define AVG_PERF_COUNTER(cnt, sample) \
168 ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
169#define GET_PERF_COUNTER(cnt) (cnt)
170#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
171
172#else
173
174#define NUM_PERF_STATS 0
175#define INC_PERF_COUNTER(cnt) do {} while (0)
176#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
177#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
178#define GET_PERF_COUNTER(cnt) (0)
179#define GET_AVG_PERF_COUNTER(cnt) (0)
180#endif /* MLX4_EN_PERF_STAT */
181
182/*
183 * Configurables
184 */
185
186enum cq_type {
187 RX = 0,
188 TX = 1,
189};
190
191
192/*
193 * Useful macros
194 */
195#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
196#define XNOR(x, y) (!(x) == !(y))
197#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
198
199
200struct mlx4_en_tx_info {
201 struct sk_buff *skb;
202 u32 nr_txbb;
203 u8 linear;
204 u8 data_offset;
205};
206
207
208#define MLX4_EN_BIT_DESC_OWN 0x80000000
209#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
210#define MLX4_EN_MEMTYPE_PAD 0x100
211#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
212
213
214struct mlx4_en_tx_desc {
215 struct mlx4_wqe_ctrl_seg ctrl;
216 union {
217 struct mlx4_wqe_data_seg data; /* at least one data segment */
218 struct mlx4_wqe_lso_seg lso;
219 struct mlx4_wqe_inline_seg inl;
220 };
221};
222
223#define MLX4_EN_USE_SRQ 0x01000000
224
225struct mlx4_en_rx_alloc {
226 struct page *page;
227 u16 offset;
228};
229
230struct mlx4_en_tx_ring {
231 struct mlx4_hwq_resources wqres;
232 u32 size ; /* number of TXBBs */
233 u32 size_mask;
234 u16 stride;
235 u16 cqn; /* index of port CQ associated with this ring */
236 u32 prod;
237 u32 cons;
238 u32 buf_size;
239 u32 doorbell_qpn;
240 void *buf;
241 u16 poll_cnt;
242 int blocked;
243 struct mlx4_en_tx_info *tx_info;
244 u8 *bounce_buf;
245 u32 last_nr_txbb;
246 struct mlx4_qp qp;
247 struct mlx4_qp_context context;
248 int qpn;
249 enum mlx4_qp_state qp_state;
250 struct mlx4_srq dummy;
251 unsigned long bytes;
252 unsigned long packets;
253 spinlock_t comp_lock;
254};
255
256struct mlx4_en_rx_desc {
257 struct mlx4_wqe_srq_next_seg next;
258 /* actual number of entries depends on rx ring stride */
259 struct mlx4_wqe_data_seg data[0];
260};
261
262struct mlx4_en_rx_ring {
263 struct mlx4_srq srq;
264 struct mlx4_hwq_resources wqres;
265 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
266 struct net_lro_mgr lro;
267 u32 size ; /* number of Rx descs*/
268 u32 actual_size;
269 u32 size_mask;
270 u16 stride;
271 u16 log_stride;
272 u16 cqn; /* index of port CQ associated with this ring */
273 u32 prod;
274 u32 cons;
275 u32 buf_size;
276 int need_refill;
277 int full;
278 void *buf;
279 void *rx_info;
280 unsigned long bytes;
281 unsigned long packets;
282};
283
284
285static inline int mlx4_en_can_lro(__be16 status)
286{
287 return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
288 MLX4_CQE_STATUS_IPV4F |
289 MLX4_CQE_STATUS_IPV6 |
290 MLX4_CQE_STATUS_IPV4OPT |
291 MLX4_CQE_STATUS_TCP |
292 MLX4_CQE_STATUS_UDP |
293 MLX4_CQE_STATUS_IPOK)) ==
294 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
295 MLX4_CQE_STATUS_IPOK |
296 MLX4_CQE_STATUS_TCP);
297}
298
299struct mlx4_en_cq {
300 struct mlx4_cq mcq;
301 struct mlx4_hwq_resources wqres;
302 int ring;
303 spinlock_t lock;
304 struct net_device *dev;
305 struct napi_struct napi;
306 /* Per-core Tx cq processing support */
307 struct timer_list timer;
308 int size;
309 int buf_size;
310 unsigned vector;
311 enum cq_type is_tx;
312 u16 moder_time;
313 u16 moder_cnt;
314 int armed;
315 struct mlx4_cqe *buf;
316#define MLX4_EN_OPCODE_ERROR 0x1e
317};
318
319struct mlx4_en_port_profile {
320 u32 flags;
321 u32 tx_ring_num;
322 u32 rx_ring_num;
323 u32 tx_ring_size;
324 u32 rx_ring_size;
325 u8 rx_pause;
326 u8 rx_ppp;
327 u8 tx_pause;
328 u8 tx_ppp;
329};
330
331struct mlx4_en_profile {
332 int rss_xor;
333 int num_lro;
334 u8 rss_mask;
335 u32 active_ports;
336 u32 small_pkt_int;
337 int rx_moder_cnt;
338 int rx_moder_time;
339 int auto_moder;
340 u8 no_reset;
341 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
342};
343
344struct mlx4_en_dev {
345 struct mlx4_dev *dev;
346 struct pci_dev *pdev;
347 struct mutex state_lock;
348 struct net_device *pndev[MLX4_MAX_PORTS + 1];
349 u32 port_cnt;
350 bool device_up;
351 struct mlx4_en_profile profile;
352 u32 LSO_support;
353 struct workqueue_struct *workqueue;
354 struct device *dma_device;
355 void __iomem *uar_map;
356 struct mlx4_uar priv_uar;
357 struct mlx4_mr mr;
358 u32 priv_pdn;
359 spinlock_t uar_lock;
360};
361
362
363struct mlx4_en_rss_map {
364 int size;
365 int base_qpn;
366 u16 map[MAX_RSS_MAP_SIZE];
367 struct mlx4_qp qps[MAX_RSS_MAP_SIZE];
368 enum mlx4_qp_state state[MAX_RSS_MAP_SIZE];
369 struct mlx4_qp indir_qp;
370 enum mlx4_qp_state indir_state;
371};
372
373struct mlx4_en_rss_context {
374 __be32 base_qpn;
375 __be32 default_qpn;
376 u16 reserved;
377 u8 hash_fn;
378 u8 flags;
379 __be32 rss_key[10];
380};
381
382struct mlx4_en_pkt_stats {
383 unsigned long broadcast;
384 unsigned long rx_prio[8];
385 unsigned long tx_prio[8];
386#define NUM_PKT_STATS 17
387};
388
389struct mlx4_en_port_stats {
390 unsigned long lro_aggregated;
391 unsigned long lro_flushed;
392 unsigned long lro_no_desc;
393 unsigned long tso_packets;
394 unsigned long queue_stopped;
395 unsigned long wake_queue;
396 unsigned long tx_timeout;
397 unsigned long rx_alloc_failed;
398 unsigned long rx_chksum_good;
399 unsigned long rx_chksum_none;
400 unsigned long tx_chksum_offload;
401#define NUM_PORT_STATS 11
402};
403
404struct mlx4_en_perf_stats {
405 u32 tx_poll;
406 u64 tx_pktsz_avg;
407 u32 inflight_avg;
408 u16 tx_coal_avg;
409 u16 rx_coal_avg;
410 u32 napi_quota;
411#define NUM_PERF_COUNTERS 6
412};
413
414struct mlx4_en_frag_info {
415 u16 frag_size;
416 u16 frag_prefix_size;
417 u16 frag_stride;
418 u16 frag_align;
419 u16 last_offset;
420
421};
422
423struct mlx4_en_priv {
424 struct mlx4_en_dev *mdev;
425 struct mlx4_en_port_profile *prof;
426 struct net_device *dev;
427 struct vlan_group *vlgrp;
428 struct net_device_stats stats;
429 struct net_device_stats ret_stats;
430 spinlock_t stats_lock;
431
432 unsigned long last_moder_packets;
433 unsigned long last_moder_tx_packets;
434 unsigned long last_moder_bytes;
435 unsigned long last_moder_jiffies;
436 int last_moder_time;
437 u16 rx_usecs;
438 u16 rx_frames;
439 u16 tx_usecs;
440 u16 tx_frames;
441 u32 pkt_rate_low;
442 u16 rx_usecs_low;
443 u32 pkt_rate_high;
444 u16 rx_usecs_high;
445 u16 sample_interval;
446 u16 adaptive_rx_coal;
447 u32 msg_enable;
448
449 struct mlx4_hwq_resources res;
450 int link_state;
451 int last_link_state;
452 bool port_up;
453 int port;
454 int registered;
455 int allocated;
456 int stride;
457 int rx_csum;
458 u64 mac;
459 int mac_index;
460 unsigned max_mtu;
461 int base_qpn;
462
463 struct mlx4_en_rss_map rss_map;
464 u16 tx_prio_map[8];
465 u32 flags;
466#define MLX4_EN_FLAG_PROMISC 0x1
467 u32 tx_ring_num;
468 u32 rx_ring_num;
469 u32 rx_skb_size;
470 struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
471 u16 num_frags;
472 u16 log_rx_info;
473
474 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
475 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
476 struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
477 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
478 struct work_struct mcast_task;
479 struct work_struct mac_task;
480 struct delayed_work refill_task;
481 struct work_struct watchdog_task;
482 struct work_struct linkstate_task;
483 struct delayed_work stats_task;
484 struct mlx4_en_perf_stats pstats;
485 struct mlx4_en_pkt_stats pkstats;
486 struct mlx4_en_port_stats port_stats;
487 struct dev_mc_list *mc_list;
488 struct mlx4_en_stat_out_mbox hw_stats;
489};
490
491
492void mlx4_en_destroy_netdev(struct net_device *dev);
493int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
494 struct mlx4_en_port_profile *prof);
495
496int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
497
498int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
499 int entries, int ring, enum cq_type mode);
500void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
501int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
502void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
503int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
504int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
505
506void mlx4_en_poll_tx_cq(unsigned long data);
507void mlx4_en_tx_irq(struct mlx4_cq *mcq);
508int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
509
510int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
511 u32 size, u16 stride);
512void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
513int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
514 struct mlx4_en_tx_ring *ring,
515 int cq, int srqn);
516void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
517 struct mlx4_en_tx_ring *ring);
518
519int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
520 struct mlx4_en_rx_ring *ring,
521 u32 size, u16 stride);
522void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
523 struct mlx4_en_rx_ring *ring);
524int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
525void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
526 struct mlx4_en_rx_ring *ring);
527int mlx4_en_process_rx_cq(struct net_device *dev,
528 struct mlx4_en_cq *cq,
529 int budget);
530int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
531void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
532 int is_tx, int rss, int qpn, int cqn, int srqn,
533 struct mlx4_qp_context *context);
534int mlx4_en_map_buffer(struct mlx4_buf *buf);
535void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
536
537void mlx4_en_calc_rx_buf(struct net_device *dev);
538void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
539 struct mlx4_en_rss_map *rss_map,
540 int num_entries, int num_rings);
541void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
542int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
543void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
544int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
545void mlx4_en_rx_refill(struct work_struct *work);
546void mlx4_en_rx_irq(struct mlx4_cq *mcq);
547
548int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
549int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
550int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
551 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
552int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
553 u8 promisc);
554
555int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
556
557/*
558 * Globals
559 */
560extern const struct ethtool_ops mlx4_en_ethtool_ops;
561#endif
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index d1dd5b48dbd1..0caf74cae8bc 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
461 int err; 461 int err;
462 462
463 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 463 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
464 ~0, dev->caps.reserved_mrws); 464 ~0, dev->caps.reserved_mrws, 0);
465 if (err) 465 if (err)
466 return err; 466 return err;
467 467
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index aa616892d09c..26d1a7a9e375 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
62 struct mlx4_priv *priv = mlx4_priv(dev); 62 struct mlx4_priv *priv = mlx4_priv(dev);
63 63
64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, 64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
65 (1 << 24) - 1, dev->caps.reserved_pds); 65 (1 << 24) - 1, dev->caps.reserved_pds, 0);
66} 66}
67 67
68void mlx4_cleanup_pd_table(struct mlx4_dev *dev) 68void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
100 100
101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, 101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
102 dev->caps.num_uars, dev->caps.num_uars - 1, 102 dev->caps.num_uars, dev->caps.num_uars - 1,
103 max(128, dev->caps.reserved_uars)); 103 max(128, dev->caps.reserved_uars), 0);
104} 104}
105 105
106void mlx4_cleanup_uar_table(struct mlx4_dev *dev) 106void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
new file mode 100644
index 000000000000..0a057e5dc63b
--- /dev/null
+++ b/drivers/net/mlx4/port.c
@@ -0,0 +1,319 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/if_ether.h>
35
36#include <linux/mlx4/cmd.h>
37
38#include "mlx4.h"
39
40#define MLX4_MAC_VALID (1ull << 63)
41#define MLX4_MAC_MASK 0xffffffffffffULL
42
43#define MLX4_VLAN_VALID (1u << 31)
44#define MLX4_VLAN_MASK 0xfff
45
46void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
47{
48 int i;
49
50 mutex_init(&table->mutex);
51 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
52 table->entries[i] = 0;
53 table->refs[i] = 0;
54 }
55 table->max = 1 << dev->caps.log_num_macs;
56 table->total = 0;
57}
58
59void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
60{
61 int i;
62
63 mutex_init(&table->mutex);
64 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
65 table->entries[i] = 0;
66 table->refs[i] = 0;
67 }
68 table->max = 1 << dev->caps.log_num_vlans;
69 table->total = 0;
70}
71
72static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
73 __be64 *entries)
74{
75 struct mlx4_cmd_mailbox *mailbox;
76 u32 in_mod;
77 int err;
78
79 mailbox = mlx4_alloc_cmd_mailbox(dev);
80 if (IS_ERR(mailbox))
81 return PTR_ERR(mailbox);
82
83 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
84
85 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
86 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
87 MLX4_CMD_TIME_CLASS_B);
88
89 mlx4_free_cmd_mailbox(dev, mailbox);
90 return err;
91}
92
93int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
94{
95 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
96 int i, err = 0;
97 int free = -1;
98
99 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
100 mutex_lock(&table->mutex);
101 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
102 if (free < 0 && !table->refs[i]) {
103 free = i;
104 continue;
105 }
106
107 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
108 /* MAC already registered, increase refernce count */
109 *index = i;
110 ++table->refs[i];
111 goto out;
112 }
113 }
114 mlx4_dbg(dev, "Free MAC index is %d\n", free);
115
116 if (table->total == table->max) {
117 /* No free mac entries */
118 err = -ENOSPC;
119 goto out;
120 }
121
122 /* Register new MAC */
123 table->refs[free] = 1;
124 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
125
126 err = mlx4_set_port_mac_table(dev, port, table->entries);
127 if (unlikely(err)) {
128 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
129 table->refs[free] = 0;
130 table->entries[free] = 0;
131 goto out;
132 }
133
134 *index = free;
135 ++table->total;
136out:
137 mutex_unlock(&table->mutex);
138 return err;
139}
140EXPORT_SYMBOL_GPL(mlx4_register_mac);
141
142void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
143{
144 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
145
146 mutex_lock(&table->mutex);
147 if (!table->refs[index]) {
148 mlx4_warn(dev, "No MAC entry for index %d\n", index);
149 goto out;
150 }
151 if (--table->refs[index]) {
152 mlx4_warn(dev, "Have more references for index %d,"
153 "no need to modify MAC table\n", index);
154 goto out;
155 }
156 table->entries[index] = 0;
157 mlx4_set_port_mac_table(dev, port, table->entries);
158 --table->total;
159out:
160 mutex_unlock(&table->mutex);
161}
162EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
163
164static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
165 __be32 *entries)
166{
167 struct mlx4_cmd_mailbox *mailbox;
168 u32 in_mod;
169 int err;
170
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
172 if (IS_ERR(mailbox))
173 return PTR_ERR(mailbox);
174
175 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
176 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
177 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
178 MLX4_CMD_TIME_CLASS_B);
179
180 mlx4_free_cmd_mailbox(dev, mailbox);
181
182 return err;
183}
184
185int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
186{
187 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
188 int i, err = 0;
189 int free = -1;
190
191 mutex_lock(&table->mutex);
192 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
193 if (free < 0 && (table->refs[i] == 0)) {
194 free = i;
195 continue;
196 }
197
198 if (table->refs[i] &&
199 (vlan == (MLX4_VLAN_MASK &
200 be32_to_cpu(table->entries[i])))) {
201 /* Vlan already registered, increase refernce count */
202 *index = i;
203 ++table->refs[i];
204 goto out;
205 }
206 }
207
208 if (table->total == table->max) {
209 /* No free vlan entries */
210 err = -ENOSPC;
211 goto out;
212 }
213
214 /* Register new MAC */
215 table->refs[free] = 1;
216 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
217
218 err = mlx4_set_port_vlan_table(dev, port, table->entries);
219 if (unlikely(err)) {
220 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
221 table->refs[free] = 0;
222 table->entries[free] = 0;
223 goto out;
224 }
225
226 *index = free;
227 ++table->total;
228out:
229 mutex_unlock(&table->mutex);
230 return err;
231}
232EXPORT_SYMBOL_GPL(mlx4_register_vlan);
233
234void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
235{
236 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
237
238 if (index < MLX4_VLAN_REGULAR) {
239 mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
240 return;
241 }
242
243 mutex_lock(&table->mutex);
244 if (!table->refs[index]) {
245 mlx4_warn(dev, "No vlan entry for index %d\n", index);
246 goto out;
247 }
248 if (--table->refs[index]) {
249 mlx4_dbg(dev, "Have more references for index %d,"
250 "no need to modify vlan table\n", index);
251 goto out;
252 }
253 table->entries[index] = 0;
254 mlx4_set_port_vlan_table(dev, port, table->entries);
255 --table->total;
256out:
257 mutex_unlock(&table->mutex);
258}
259EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
260
261int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
262{
263 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
264 u8 *inbuf, *outbuf;
265 int err;
266
267 inmailbox = mlx4_alloc_cmd_mailbox(dev);
268 if (IS_ERR(inmailbox))
269 return PTR_ERR(inmailbox);
270
271 outmailbox = mlx4_alloc_cmd_mailbox(dev);
272 if (IS_ERR(outmailbox)) {
273 mlx4_free_cmd_mailbox(dev, inmailbox);
274 return PTR_ERR(outmailbox);
275 }
276
277 inbuf = inmailbox->buf;
278 outbuf = outmailbox->buf;
279 memset(inbuf, 0, 256);
280 memset(outbuf, 0, 256);
281 inbuf[0] = 1;
282 inbuf[1] = 1;
283 inbuf[2] = 1;
284 inbuf[3] = 1;
285 *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
286 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
287
288 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
289 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
290 if (!err)
291 *caps = *(__be32 *) (outbuf + 84);
292 mlx4_free_cmd_mailbox(dev, inmailbox);
293 mlx4_free_cmd_mailbox(dev, outmailbox);
294 return err;
295}
296
297int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
298{
299 struct mlx4_cmd_mailbox *mailbox;
300 int err;
301 u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
302
303 mailbox = mlx4_alloc_cmd_mailbox(dev);
304 if (IS_ERR(mailbox))
305 return PTR_ERR(mailbox);
306
307 memset(mailbox->buf, 0, 256);
308 if (is_eth) {
309 ((u8 *) mailbox->buf)[3] = 6;
310 ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
311 ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
312 } else
313 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
314 err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
315 MLX4_CMD_TIME_CLASS_B);
316
317 mlx4_free_cmd_mailbox(dev, mailbox);
318 return err;
319}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index c49a86044bf7..1c565ef8d179 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
147} 147}
148EXPORT_SYMBOL_GPL(mlx4_qp_modify); 148EXPORT_SYMBOL_GPL(mlx4_qp_modify);
149 149
150int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp) 150int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
151{
152 struct mlx4_priv *priv = mlx4_priv(dev);
153 struct mlx4_qp_table *qp_table = &priv->qp_table;
154 int qpn;
155
156 qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
157 if (qpn == -1)
158 return -ENOMEM;
159
160 *base = qpn;
161 return 0;
162}
163EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
164
165void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
166{
167 struct mlx4_priv *priv = mlx4_priv(dev);
168 struct mlx4_qp_table *qp_table = &priv->qp_table;
169 if (base_qpn < dev->caps.sqp_start + 8)
170 return;
171
172 mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
173}
174EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
175
176int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
151{ 177{
152 struct mlx4_priv *priv = mlx4_priv(dev); 178 struct mlx4_priv *priv = mlx4_priv(dev);
153 struct mlx4_qp_table *qp_table = &priv->qp_table; 179 struct mlx4_qp_table *qp_table = &priv->qp_table;
154 int err; 180 int err;
155 181
156 if (sqpn) 182 if (!qpn)
157 qp->qpn = sqpn; 183 return -EINVAL;
158 else { 184
159 qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap); 185 qp->qpn = qpn;
160 if (qp->qpn == -1)
161 return -ENOMEM;
162 }
163 186
164 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); 187 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
165 if (err) 188 if (err)
@@ -208,9 +231,6 @@ err_put_qp:
208 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 231 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
209 232
210err_out: 233err_out:
211 if (!sqpn)
212 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
213
214 return err; 234 return err;
215} 235}
216EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 236EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
@@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
239 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); 259 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
240 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); 260 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
241 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 261 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
242
243 if (qp->qpn >= dev->caps.sqp_start + 8)
244 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
245} 262}
246EXPORT_SYMBOL_GPL(mlx4_qp_free); 263EXPORT_SYMBOL_GPL(mlx4_qp_free);
247 264
@@ -255,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
255{ 272{
256 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 273 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
257 int err; 274 int err;
275 int reserved_from_top = 0;
258 276
259 spin_lock_init(&qp_table->lock); 277 spin_lock_init(&qp_table->lock);
260 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 278 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -264,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
264 * block of special QPs must be aligned to a multiple of 8, so 282 * block of special QPs must be aligned to a multiple of 8, so
265 * round up. 283 * round up.
266 */ 284 */
267 dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8); 285 dev->caps.sqp_start =
286 ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
287
288 {
289 int sort[MLX4_NUM_QP_REGION];
290 int i, j, tmp;
291 int last_base = dev->caps.num_qps;
292
293 for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
294 sort[i] = i;
295
296 for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
297 for (j = 2; j < i; ++j) {
298 if (dev->caps.reserved_qps_cnt[sort[j]] >
299 dev->caps.reserved_qps_cnt[sort[j - 1]]) {
300 tmp = sort[j];
301 sort[j] = sort[j - 1];
302 sort[j - 1] = tmp;
303 }
304 }
305 }
306
307 for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
308 last_base -= dev->caps.reserved_qps_cnt[sort[i]];
309 dev->caps.reserved_qps_base[sort[i]] = last_base;
310 reserved_from_top +=
311 dev->caps.reserved_qps_cnt[sort[i]];
312 }
313
314 }
315
268 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, 316 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
269 (1 << 24) - 1, dev->caps.sqp_start + 8); 317 (1 << 23) - 1, dev->caps.sqp_start + 8,
318 reserved_from_top);
270 if (err) 319 if (err)
271 return err; 320 return err;
272 321
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 533eb6db24b3..fe9f218691f5 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
245 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); 245 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
246 246
247 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, 247 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
248 dev->caps.num_srqs - 1, dev->caps.reserved_srqs); 248 dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
249 if (err) 249 if (err)
250 return err; 250 return err;
251 251
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a9c8c08044b1..e513f76f2a9f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -899,7 +899,8 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
899 if (skb != NULL) { 899 if (skb != NULL) {
900 if (skb_queue_len(&mp->rx_recycle) < 900 if (skb_queue_len(&mp->rx_recycle) <
901 mp->default_rx_ring_size && 901 mp->default_rx_ring_size &&
902 skb_recycle_check(skb, mp->skb_size)) 902 skb_recycle_check(skb, mp->skb_size +
903 dma_get_cache_alignment() - 1))
903 __skb_queue_head(&mp->rx_recycle, skb); 904 __skb_queue_head(&mp->rx_recycle, skb);
904 else 905 else
905 dev_kfree_skb(skb); 906 dev_kfree_skb(skb);
@@ -1066,9 +1067,12 @@ static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
1066 return 0; 1067 return 0;
1067 } 1068 }
1068 1069
1069 if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1070 if (!smi_is_done(msp)) {
1070 msecs_to_jiffies(100))) 1071 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1071 return -ETIMEDOUT; 1072 msecs_to_jiffies(100));
1073 if (!smi_is_done(msp))
1074 return -ETIMEDOUT;
1075 }
1072 1076
1073 return 0; 1077 return 0;
1074} 1078}
@@ -2432,8 +2436,8 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2432 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2436 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2433 2437
2434 if (pd == NULL || pd->shared_smi == NULL) { 2438 if (pd == NULL || pd->shared_smi == NULL) {
2435 mdiobus_free(msp->smi_bus);
2436 mdiobus_unregister(msp->smi_bus); 2439 mdiobus_unregister(msp->smi_bus);
2440 mdiobus_free(msp->smi_bus);
2437 } 2441 }
2438 if (msp->err_interrupt != NO_IRQ) 2442 if (msp->err_interrupt != NO_IRQ)
2439 free_irq(msp->err_interrupt, msp); 2443 free_irq(msp->err_interrupt, msp);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a9aebad52652..b37867097308 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.4.3-1.369" 78#define MYRI10GE_VERSION_STR "1.4.3-1.378"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1393,6 +1393,8 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1393 if (tx->req == tx->done) { 1393 if (tx->req == tx->done) {
1394 tx->queue_active = 0; 1394 tx->queue_active = 0;
1395 put_be32(htonl(1), tx->send_stop); 1395 put_be32(htonl(1), tx->send_stop);
1396 mb();
1397 mmiowb();
1396 } 1398 }
1397 __netif_tx_unlock(dev_queue); 1399 __netif_tx_unlock(dev_queue);
1398 } 1400 }
@@ -2497,6 +2499,10 @@ static int myri10ge_open(struct net_device *dev)
2497 return 0; 2499 return 0;
2498 2500
2499abort_with_rings: 2501abort_with_rings:
2502 while (slice) {
2503 slice--;
2504 napi_disable(&mgp->ss[slice].napi);
2505 }
2500 for (i = 0; i < mgp->num_slices; i++) 2506 for (i = 0; i < mgp->num_slices; i++)
2501 myri10ge_free_rings(&mgp->ss[i]); 2507 myri10ge_free_rings(&mgp->ss[i]);
2502 2508
@@ -2860,6 +2866,8 @@ again:
2860 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) { 2866 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
2861 tx->queue_active = 1; 2867 tx->queue_active = 1;
2862 put_be32(htonl(1), tx->send_go); 2868 put_be32(htonl(1), tx->send_go);
2869 mb();
2870 mmiowb();
2863 } 2871 }
2864 tx->pkt_start++; 2872 tx->pkt_start++;
2865 if ((avail - count) < MXGEFW_MAX_SEND_DESC) { 2873 if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index b9bed82e1d21..b289a0a2b945 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -401,6 +401,8 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
401 priv->xmac_base = priv->xc->xmac_base; 401 priv->xmac_base = priv->xc->xmac_base;
402 priv->sram_base = priv->xc->sram_base; 402 priv->sram_base = priv->xc->sram_base;
403 403
404 spin_lock_init(&priv->lock);
405
404 ret = pfifo_request(PFIFO_MASK(priv->id)); 406 ret = pfifo_request(PFIFO_MASK(priv->id));
405 if (ret) { 407 if (ret) {
406 printk("unable to request PFIFO\n"); 408 printk("unable to request PFIFO\n");
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index ebc812702903..1b6f548c4411 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -33,8 +33,8 @@
33 33
34#define DRV_MODULE_NAME "niu" 34#define DRV_MODULE_NAME "niu"
35#define PFX DRV_MODULE_NAME ": " 35#define PFX DRV_MODULE_NAME ": "
36#define DRV_MODULE_VERSION "0.9" 36#define DRV_MODULE_VERSION "1.0"
37#define DRV_MODULE_RELDATE "May 4, 2008" 37#define DRV_MODULE_RELDATE "Nov 14, 2008"
38 38
39static char version[] __devinitdata = 39static char version[] __devinitdata =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -51,8 +51,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
51#ifndef readq 51#ifndef readq
52static u64 readq(void __iomem *reg) 52static u64 readq(void __iomem *reg)
53{ 53{
54 return (((u64)readl(reg + 0x4UL) << 32) | 54 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
55 (u64)readl(reg));
56} 55}
57 56
58static void writeq(u64 val, void __iomem *reg) 57static void writeq(u64 val, void __iomem *reg)
@@ -407,7 +406,7 @@ static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
407} 406}
408 407
409/* Mode is always 10G fiber. */ 408/* Mode is always 10G fiber. */
410static int serdes_init_niu(struct niu *np) 409static int serdes_init_niu_10g_fiber(struct niu *np)
411{ 410{
412 struct niu_link_config *lp = &np->link_config; 411 struct niu_link_config *lp = &np->link_config;
413 u32 tx_cfg, rx_cfg; 412 u32 tx_cfg, rx_cfg;
@@ -444,6 +443,223 @@ static int serdes_init_niu(struct niu *np)
444 return 0; 443 return 0;
445} 444}
446 445
446static int serdes_init_niu_1g_serdes(struct niu *np)
447{
448 struct niu_link_config *lp = &np->link_config;
449 u16 pll_cfg, pll_sts;
450 int max_retry = 100;
451 u64 sig, mask, val;
452 u32 tx_cfg, rx_cfg;
453 unsigned long i;
454 int err;
455
456 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
457 PLL_TX_CFG_RATE_HALF);
458 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
459 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
460 PLL_RX_CFG_RATE_HALF);
461
462 if (np->port == 0)
463 rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
464
465 if (lp->loopback_mode == LOOPBACK_PHY) {
466 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
467
468 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
469 ESR2_TI_PLL_TEST_CFG_L, test_cfg);
470
471 tx_cfg |= PLL_TX_CFG_ENTEST;
472 rx_cfg |= PLL_RX_CFG_ENTEST;
473 }
474
475 /* Initialize PLL for 1G */
476 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
477
478 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
479 ESR2_TI_PLL_CFG_L, pll_cfg);
480 if (err) {
481 dev_err(np->device, PFX "NIU Port %d "
482 "serdes_init_niu_1g_serdes: "
483 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
484 return err;
485 }
486
487 pll_sts = PLL_CFG_ENPLL;
488
489 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
490 ESR2_TI_PLL_STS_L, pll_sts);
491 if (err) {
492 dev_err(np->device, PFX "NIU Port %d "
493 "serdes_init_niu_1g_serdes: "
494 "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
495 return err;
496 }
497
498 udelay(200);
499
500 /* Initialize all 4 lanes of the SERDES. */
501 for (i = 0; i < 4; i++) {
502 err = esr2_set_tx_cfg(np, i, tx_cfg);
503 if (err)
504 return err;
505 }
506
507 for (i = 0; i < 4; i++) {
508 err = esr2_set_rx_cfg(np, i, rx_cfg);
509 if (err)
510 return err;
511 }
512
513 switch (np->port) {
514 case 0:
515 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
516 mask = val;
517 break;
518
519 case 1:
520 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
521 mask = val;
522 break;
523
524 default:
525 return -EINVAL;
526 }
527
528 while (max_retry--) {
529 sig = nr64(ESR_INT_SIGNALS);
530 if ((sig & mask) == val)
531 break;
532
533 mdelay(500);
534 }
535
536 if ((sig & mask) != val) {
537 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
538 "[%08x]\n", np->port, (int) (sig & mask), (int) val);
539 return -ENODEV;
540 }
541
542 return 0;
543}
544
545static int serdes_init_niu_10g_serdes(struct niu *np)
546{
547 struct niu_link_config *lp = &np->link_config;
548 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
549 int max_retry = 100;
550 u64 sig, mask, val;
551 unsigned long i;
552 int err;
553
554 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
555 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
556 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
557 PLL_RX_CFG_EQ_LP_ADAPTIVE);
558
559 if (lp->loopback_mode == LOOPBACK_PHY) {
560 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
561
562 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
563 ESR2_TI_PLL_TEST_CFG_L, test_cfg);
564
565 tx_cfg |= PLL_TX_CFG_ENTEST;
566 rx_cfg |= PLL_RX_CFG_ENTEST;
567 }
568
569 /* Initialize PLL for 10G */
570 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
571
572 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
573 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
574 if (err) {
575 dev_err(np->device, PFX "NIU Port %d "
576 "serdes_init_niu_10g_serdes: "
577 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
578 return err;
579 }
580
581 pll_sts = PLL_CFG_ENPLL;
582
583 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
584 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
585 if (err) {
586 dev_err(np->device, PFX "NIU Port %d "
587 "serdes_init_niu_10g_serdes: "
588 "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
589 return err;
590 }
591
592 udelay(200);
593
594 /* Initialize all 4 lanes of the SERDES. */
595 for (i = 0; i < 4; i++) {
596 err = esr2_set_tx_cfg(np, i, tx_cfg);
597 if (err)
598 return err;
599 }
600
601 for (i = 0; i < 4; i++) {
602 err = esr2_set_rx_cfg(np, i, rx_cfg);
603 if (err)
604 return err;
605 }
606
607 /* check if serdes is ready */
608
609 switch (np->port) {
610 case 0:
611 mask = ESR_INT_SIGNALS_P0_BITS;
612 val = (ESR_INT_SRDY0_P0 |
613 ESR_INT_DET0_P0 |
614 ESR_INT_XSRDY_P0 |
615 ESR_INT_XDP_P0_CH3 |
616 ESR_INT_XDP_P0_CH2 |
617 ESR_INT_XDP_P0_CH1 |
618 ESR_INT_XDP_P0_CH0);
619 break;
620
621 case 1:
622 mask = ESR_INT_SIGNALS_P1_BITS;
623 val = (ESR_INT_SRDY0_P1 |
624 ESR_INT_DET0_P1 |
625 ESR_INT_XSRDY_P1 |
626 ESR_INT_XDP_P1_CH3 |
627 ESR_INT_XDP_P1_CH2 |
628 ESR_INT_XDP_P1_CH1 |
629 ESR_INT_XDP_P1_CH0);
630 break;
631
632 default:
633 return -EINVAL;
634 }
635
636 while (max_retry--) {
637 sig = nr64(ESR_INT_SIGNALS);
638 if ((sig & mask) == val)
639 break;
640
641 mdelay(500);
642 }
643
644 if ((sig & mask) != val) {
645 pr_info(PFX "NIU Port %u signal bits [%08x] are not "
646 "[%08x] for 10G...trying 1G\n",
647 np->port, (int) (sig & mask), (int) val);
648
649 /* 10G failed, try initializing at 1G */
650 err = serdes_init_niu_1g_serdes(np);
651 if (!err) {
652 np->flags &= ~NIU_FLAGS_10G;
653 np->mac_xcvr = MAC_XCVR_PCS;
654 } else {
655 dev_err(np->device, PFX "Port %u 10G/1G SERDES "
656 "Link Failed \n", np->port);
657 return -ENODEV;
658 }
659 }
660 return 0;
661}
662
447static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) 663static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
448{ 664{
449 int err; 665 int err;
@@ -1955,13 +2171,23 @@ static const struct niu_phy_ops phy_ops_10g_serdes = {
1955 .link_status = link_status_10g_serdes, 2171 .link_status = link_status_10g_serdes,
1956}; 2172};
1957 2173
2174static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2175 .serdes_init = serdes_init_niu_10g_serdes,
2176 .link_status = link_status_10g_serdes,
2177};
2178
2179static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2180 .serdes_init = serdes_init_niu_1g_serdes,
2181 .link_status = link_status_1g_serdes,
2182};
2183
1958static const struct niu_phy_ops phy_ops_1g_rgmii = { 2184static const struct niu_phy_ops phy_ops_1g_rgmii = {
1959 .xcvr_init = xcvr_init_1g_rgmii, 2185 .xcvr_init = xcvr_init_1g_rgmii,
1960 .link_status = link_status_1g_rgmii, 2186 .link_status = link_status_1g_rgmii,
1961}; 2187};
1962 2188
1963static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 2189static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
1964 .serdes_init = serdes_init_niu, 2190 .serdes_init = serdes_init_niu_10g_fiber,
1965 .xcvr_init = xcvr_init_10g, 2191 .xcvr_init = xcvr_init_10g,
1966 .link_status = link_status_10g, 2192 .link_status = link_status_10g,
1967}; 2193};
@@ -1999,11 +2225,21 @@ struct niu_phy_template {
1999 u32 phy_addr_base; 2225 u32 phy_addr_base;
2000}; 2226};
2001 2227
2002static const struct niu_phy_template phy_template_niu = { 2228static const struct niu_phy_template phy_template_niu_10g_fiber = {
2003 .ops = &phy_ops_10g_fiber_niu, 2229 .ops = &phy_ops_10g_fiber_niu,
2004 .phy_addr_base = 16, 2230 .phy_addr_base = 16,
2005}; 2231};
2006 2232
2233static const struct niu_phy_template phy_template_niu_10g_serdes = {
2234 .ops = &phy_ops_10g_serdes_niu,
2235 .phy_addr_base = 0,
2236};
2237
2238static const struct niu_phy_template phy_template_niu_1g_serdes = {
2239 .ops = &phy_ops_1g_serdes_niu,
2240 .phy_addr_base = 0,
2241};
2242
2007static const struct niu_phy_template phy_template_10g_fiber = { 2243static const struct niu_phy_template phy_template_10g_fiber = {
2008 .ops = &phy_ops_10g_fiber, 2244 .ops = &phy_ops_10g_fiber,
2009 .phy_addr_base = 8, 2245 .phy_addr_base = 8,
@@ -2183,8 +2419,25 @@ static int niu_determine_phy_disposition(struct niu *np)
2183 u32 phy_addr_off = 0; 2419 u32 phy_addr_off = 0;
2184 2420
2185 if (plat_type == PLAT_TYPE_NIU) { 2421 if (plat_type == PLAT_TYPE_NIU) {
2186 tp = &phy_template_niu; 2422 switch (np->flags &
2187 phy_addr_off += np->port; 2423 (NIU_FLAGS_10G |
2424 NIU_FLAGS_FIBER |
2425 NIU_FLAGS_XCVR_SERDES)) {
2426 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2427 /* 10G Serdes */
2428 tp = &phy_template_niu_10g_serdes;
2429 break;
2430 case NIU_FLAGS_XCVR_SERDES:
2431 /* 1G Serdes */
2432 tp = &phy_template_niu_1g_serdes;
2433 break;
2434 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2435 /* 10G Fiber */
2436 default:
2437 tp = &phy_template_niu_10g_fiber;
2438 phy_addr_off += np->port;
2439 break;
2440 }
2188 } else { 2441 } else {
2189 switch (np->flags & 2442 switch (np->flags &
2190 (NIU_FLAGS_10G | 2443 (NIU_FLAGS_10G |
@@ -7214,6 +7467,12 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np,
7214 np->flags |= NIU_FLAGS_10G; 7467 np->flags |= NIU_FLAGS_10G;
7215 np->flags &= ~NIU_FLAGS_FIBER; 7468 np->flags &= ~NIU_FLAGS_FIBER;
7216 np->mac_xcvr = MAC_XCVR_XPCS; 7469 np->mac_xcvr = MAC_XCVR_XPCS;
7470 } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
7471 /* 10G Serdes or 1G Serdes, default to 10G */
7472 np->flags |= NIU_FLAGS_10G;
7473 np->flags &= ~NIU_FLAGS_FIBER;
7474 np->flags |= NIU_FLAGS_XCVR_SERDES;
7475 np->mac_xcvr = MAC_XCVR_XPCS;
7217 } else { 7476 } else {
7218 return -EINVAL; 7477 return -EINVAL;
7219 } 7478 }
@@ -7742,6 +8001,8 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7742 u32 val; 8001 u32 val;
7743 int err; 8002 int err;
7744 8003
8004 num_10g = num_1g = 0;
8005
7745 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8006 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
7746 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8007 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
7747 num_10g = 0; 8008 num_10g = 0;
@@ -7758,6 +8019,16 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7758 parent->num_ports = 2; 8019 parent->num_ports = 2;
7759 val = (phy_encode(PORT_TYPE_10G, 0) | 8020 val = (phy_encode(PORT_TYPE_10G, 0) |
7760 phy_encode(PORT_TYPE_10G, 1)); 8021 phy_encode(PORT_TYPE_10G, 1));
8022 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
8023 (parent->plat_type == PLAT_TYPE_NIU)) {
8024 /* this is the Monza case */
8025 if (np->flags & NIU_FLAGS_10G) {
8026 val = (phy_encode(PORT_TYPE_10G, 0) |
8027 phy_encode(PORT_TYPE_10G, 1));
8028 } else {
8029 val = (phy_encode(PORT_TYPE_1G, 0) |
8030 phy_encode(PORT_TYPE_1G, 1));
8031 }
7761 } else { 8032 } else {
7762 err = fill_phy_probe_info(np, parent, info); 8033 err = fill_phy_probe_info(np, parent, info);
7763 if (err) 8034 if (err)
@@ -8657,7 +8928,9 @@ static void __devinit niu_device_announce(struct niu *np)
8657 dev->name, 8928 dev->name,
8658 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 8929 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
8659 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 8930 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
8660 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"), 8931 (np->flags & NIU_FLAGS_FIBER ? "FIBER" :
8932 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
8933 "COPPER")),
8661 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 8934 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
8662 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 8935 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
8663 np->vpd.phy_type); 8936 np->vpd.phy_type);
@@ -8667,7 +8940,6 @@ static void __devinit niu_device_announce(struct niu *np)
8667static int __devinit niu_pci_init_one(struct pci_dev *pdev, 8940static int __devinit niu_pci_init_one(struct pci_dev *pdev,
8668 const struct pci_device_id *ent) 8941 const struct pci_device_id *ent)
8669{ 8942{
8670 unsigned long niureg_base, niureg_len;
8671 union niu_parent_id parent_id; 8943 union niu_parent_id parent_id;
8672 struct net_device *dev; 8944 struct net_device *dev;
8673 struct niu *np; 8945 struct niu *np;
@@ -8758,10 +9030,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
8758 9030
8759 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 9031 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
8760 9032
8761 niureg_base = pci_resource_start(pdev, 0); 9033 np->regs = pci_ioremap_bar(pdev, 0);
8762 niureg_len = pci_resource_len(pdev, 0);
8763
8764 np->regs = ioremap_nocache(niureg_base, niureg_len);
8765 if (!np->regs) { 9034 if (!np->regs) {
8766 dev_err(&pdev->dev, PFX "Cannot map device registers, " 9035 dev_err(&pdev->dev, PFX "Cannot map device registers, "
8767 "aborting.\n"); 9036 "aborting.\n");
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index c6fa883daa22..180ca8ae93de 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -1048,6 +1048,13 @@
1048#define PLL_CFG_LD_SHIFT 8 1048#define PLL_CFG_LD_SHIFT 8
1049#define PLL_CFG_MPY 0x0000001e 1049#define PLL_CFG_MPY 0x0000001e
1050#define PLL_CFG_MPY_SHIFT 1 1050#define PLL_CFG_MPY_SHIFT 1
1051#define PLL_CFG_MPY_4X 0x0
1052#define PLL_CFG_MPY_5X 0x00000002
1053#define PLL_CFG_MPY_6X 0x00000004
1054#define PLL_CFG_MPY_8X 0x00000008
1055#define PLL_CFG_MPY_10X 0x0000000a
1056#define PLL_CFG_MPY_12X 0x0000000c
1057#define PLL_CFG_MPY_12P5X 0x0000000e
1051#define PLL_CFG_ENPLL 0x00000001 1058#define PLL_CFG_ENPLL 0x00000001
1052 1059
1053#define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002) 1060#define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002)
@@ -1093,6 +1100,9 @@
1093#define PLL_TX_CFG_INVPAIR 0x00000080 1100#define PLL_TX_CFG_INVPAIR 0x00000080
1094#define PLL_TX_CFG_RATE 0x00000060 1101#define PLL_TX_CFG_RATE 0x00000060
1095#define PLL_TX_CFG_RATE_SHIFT 5 1102#define PLL_TX_CFG_RATE_SHIFT 5
1103#define PLL_TX_CFG_RATE_FULL 0x0
1104#define PLL_TX_CFG_RATE_HALF 0x20
1105#define PLL_TX_CFG_RATE_QUAD 0x40
1096#define PLL_TX_CFG_BUSWIDTH 0x0000001c 1106#define PLL_TX_CFG_BUSWIDTH 0x0000001c
1097#define PLL_TX_CFG_BUSWIDTH_SHIFT 2 1107#define PLL_TX_CFG_BUSWIDTH_SHIFT 2
1098#define PLL_TX_CFG_ENTEST 0x00000002 1108#define PLL_TX_CFG_ENTEST 0x00000002
@@ -1132,6 +1142,9 @@
1132#define PLL_RX_CFG_INVPAIR 0x00000080 1142#define PLL_RX_CFG_INVPAIR 0x00000080
1133#define PLL_RX_CFG_RATE 0x00000060 1143#define PLL_RX_CFG_RATE 0x00000060
1134#define PLL_RX_CFG_RATE_SHIFT 5 1144#define PLL_RX_CFG_RATE_SHIFT 5
1145#define PLL_RX_CFG_RATE_FULL 0x0
1146#define PLL_RX_CFG_RATE_HALF 0x20
1147#define PLL_RX_CFG_RATE_QUAD 0x40
1135#define PLL_RX_CFG_BUSWIDTH 0x0000001c 1148#define PLL_RX_CFG_BUSWIDTH 0x0000001c
1136#define PLL_RX_CFG_BUSWIDTH_SHIFT 2 1149#define PLL_RX_CFG_BUSWIDTH_SHIFT 2
1137#define PLL_RX_CFG_ENTEST 0x00000002 1150#define PLL_RX_CFG_ENTEST 0x00000002
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index b37a498939ae..0418045166c3 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -779,6 +779,7 @@ static struct pcmcia_device_id axnet_ids[] = {
779 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2), 779 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
780 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), 780 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
781 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609), 781 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609),
782 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
782 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04), 783 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04),
783 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116), 784 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116),
784 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058), 785 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058),
@@ -1174,7 +1175,6 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
1174 * ax_interrupt - handle the interrupts from an 8390 1175 * ax_interrupt - handle the interrupts from an 8390
1175 * @irq: interrupt number 1176 * @irq: interrupt number
1176 * @dev_id: a pointer to the net_device 1177 * @dev_id: a pointer to the net_device
1177 * @regs: unused
1178 * 1178 *
1179 * Handle the ether interface interrupts. We pull packets from 1179 * Handle the ether interface interrupts. We pull packets from
1180 * the 8390 via the card specific functions and fire them at the networking 1180 * the 8390 via the card specific functions and fire them at the networking
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index cf3cca4642f2..f51944b28cfa 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -349,7 +349,7 @@ static int ibmtr_suspend(struct pcmcia_device *link)
349 return 0; 349 return 0;
350} 350}
351 351
352static int ibmtr_resume(struct pcmcia_device *link) 352static int __devinit ibmtr_resume(struct pcmcia_device *link)
353{ 353{
354 ibmtr_dev_t *info = link->priv; 354 ibmtr_dev_t *info = link->priv;
355 struct net_device *dev = info->dev; 355 struct net_device *dev = info->dev;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index e40d6301aa7a..ce486f094492 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1693,7 +1693,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
1693 PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8), 1693 PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8),
1694 PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76), 1694 PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76),
1695 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e), 1695 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e),
1696 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
1697 PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f), 1696 PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f),
1698 PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1), 1697 PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1),
1699 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b), 1698 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b),
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4aa547947040..eb6411c4694f 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -227,6 +227,59 @@ static int m88e1111_config_init(struct phy_device *phydev)
227 return 0; 227 return 0;
228} 228}
229 229
230static int m88e1118_config_aneg(struct phy_device *phydev)
231{
232 int err;
233
234 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
235 if (err < 0)
236 return err;
237
238 err = phy_write(phydev, MII_M1011_PHY_SCR,
239 MII_M1011_PHY_SCR_AUTO_CROSS);
240 if (err < 0)
241 return err;
242
243 err = genphy_config_aneg(phydev);
244 return 0;
245}
246
247static int m88e1118_config_init(struct phy_device *phydev)
248{
249 int err;
250
251 /* Change address */
252 err = phy_write(phydev, 0x16, 0x0002);
253 if (err < 0)
254 return err;
255
256 /* Enable 1000 Mbit */
257 err = phy_write(phydev, 0x15, 0x1070);
258 if (err < 0)
259 return err;
260
261 /* Change address */
262 err = phy_write(phydev, 0x16, 0x0003);
263 if (err < 0)
264 return err;
265
266 /* Adjust LED Control */
267 err = phy_write(phydev, 0x10, 0x021e);
268 if (err < 0)
269 return err;
270
271 /* Reset address */
272 err = phy_write(phydev, 0x16, 0x0);
273 if (err < 0)
274 return err;
275
276 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
277 if (err < 0)
278 return err;
279
280 return 0;
281}
282
230static int m88e1145_config_init(struct phy_device *phydev) 283static int m88e1145_config_init(struct phy_device *phydev)
231{ 284{
232 int err; 285 int err;
@@ -416,6 +469,19 @@ static struct phy_driver marvell_drivers[] = {
416 .driver = { .owner = THIS_MODULE }, 469 .driver = { .owner = THIS_MODULE },
417 }, 470 },
418 { 471 {
472 .phy_id = 0x01410e10,
473 .phy_id_mask = 0xfffffff0,
474 .name = "Marvell 88E1118",
475 .features = PHY_GBIT_FEATURES,
476 .flags = PHY_HAS_INTERRUPT,
477 .config_init = &m88e1118_config_init,
478 .config_aneg = &m88e1118_config_aneg,
479 .read_status = &genphy_read_status,
480 .ack_interrupt = &marvell_ack_interrupt,
481 .config_intr = &marvell_config_intr,
482 .driver = {.owner = THIS_MODULE,},
483 },
484 {
419 .phy_id = 0x01410cd0, 485 .phy_id = 0x01410cd0,
420 .phy_id_mask = 0xfffffff0, 486 .phy_id_mask = 0xfffffff0,
421 .name = "Marvell 88E1145", 487 .name = "Marvell 88E1145",
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index d0ed1ef284a8..289fc267edf3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -105,8 +105,6 @@ int mdiobus_register(struct mii_bus *bus)
105 return -EINVAL; 105 return -EINVAL;
106 } 106 }
107 107
108 bus->state = MDIOBUS_REGISTERED;
109
110 mutex_init(&bus->mdio_lock); 108 mutex_init(&bus->mdio_lock);
111 109
112 if (bus->reset) 110 if (bus->reset)
@@ -123,6 +121,9 @@ int mdiobus_register(struct mii_bus *bus)
123 } 121 }
124 } 122 }
125 123
124 if (!err)
125 bus->state = MDIOBUS_REGISTERED;
126
126 pr_info("%s: probed\n", bus->name); 127 pr_info("%s: probed\n", bus->name);
127 128
128 return err; 129 return err;
@@ -136,7 +137,7 @@ void mdiobus_unregister(struct mii_bus *bus)
136 BUG_ON(bus->state != MDIOBUS_REGISTERED); 137 BUG_ON(bus->state != MDIOBUS_REGISTERED);
137 bus->state = MDIOBUS_UNREGISTERED; 138 bus->state = MDIOBUS_UNREGISTERED;
138 139
139 device_unregister(&bus->dev); 140 device_del(&bus->dev);
140 for (i = 0; i < PHY_MAX_ADDR; i++) { 141 for (i = 0; i < PHY_MAX_ADDR; i++) {
141 if (bus->phy_map[i]) 142 if (bus->phy_map[i])
142 device_unregister(&bus->phy_map[i]->dev); 143 device_unregister(&bus->phy_map[i]->dev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e11b03b2b25a..25acbbde4a60 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -227,8 +227,17 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
227 if (r) 227 if (r)
228 return ERR_PTR(r); 228 return ERR_PTR(r);
229 229
230 /* If the phy_id is all Fs, there is no device there */ 230 /* If the phy_id is mostly Fs, there is no device there */
231 if (0xffffffff == phy_id) 231 if ((phy_id & 0x1fffffff) == 0x1fffffff)
232 return NULL;
233
234 /*
235 * Broken hardware is sometimes missing the pull down resistor on the
236 * MDIO line, which results in reads to non-existent devices returning
237 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
238 * device as well.
239 */
240 if (phy_id == 0)
232 return NULL; 241 return NULL;
233 242
234 dev = phy_device_create(bus, addr, phy_id); 243 dev = phy_device_create(bus, addr, phy_id);
@@ -564,20 +573,32 @@ EXPORT_SYMBOL(genphy_restart_aneg);
564 */ 573 */
565int genphy_config_aneg(struct phy_device *phydev) 574int genphy_config_aneg(struct phy_device *phydev)
566{ 575{
567 int result = 0; 576 int result;
568 577
569 if (AUTONEG_ENABLE == phydev->autoneg) { 578 if (AUTONEG_ENABLE != phydev->autoneg)
570 int result = genphy_config_advert(phydev); 579 return genphy_setup_forced(phydev);
571 580
572 if (result < 0) /* error */ 581 result = genphy_config_advert(phydev);
573 return result; 582
583 if (result < 0) /* error */
584 return result;
585
586 if (result == 0) {
587 /* Advertisment hasn't changed, but maybe aneg was never on to
588 * begin with? Or maybe phy was isolated? */
589 int ctl = phy_read(phydev, MII_BMCR);
590
591 if (ctl < 0)
592 return ctl;
593
594 if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
595 result = 1; /* do restart aneg */
596 }
574 597
575 /* Only restart aneg if we are advertising something different 598 /* Only restart aneg if we are advertising something different
576 * than we were before. */ 599 * than we were before. */
577 if (result > 0) 600 if (result > 0)
578 result = genphy_restart_aneg(phydev); 601 result = genphy_restart_aneg(phydev);
579 } else
580 result = genphy_setup_forced(phydev);
581 602
582 return result; 603 return result;
583} 604}
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 8874497b6bbf..dd3b2447e85a 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -34,6 +34,8 @@
34#define MII_VSC8244_IMASK_DUPLEX 0x1000 34#define MII_VSC8244_IMASK_DUPLEX 0x1000
35#define MII_VSC8244_IMASK_MASK 0xf000 35#define MII_VSC8244_IMASK_MASK 0xf000
36 36
37#define MII_VSC8221_IMASK_MASK 0xa000
38
37/* Vitesse Interrupt Status Register */ 39/* Vitesse Interrupt Status Register */
38#define MII_VSC8244_ISTAT 0x1a 40#define MII_VSC8244_ISTAT 0x1a
39#define MII_VSC8244_ISTAT_STATUS 0x8000 41#define MII_VSC8244_ISTAT_STATUS 0x8000
@@ -49,6 +51,12 @@
49#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010 51#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010
50#define MII_VSC8244_AUXCONSTAT_100 0x0008 52#define MII_VSC8244_AUXCONSTAT_100 0x0008
51 53
54#define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */
55#define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004
56
57#define PHY_ID_VSC8244 0x000fc6c0
58#define PHY_ID_VSC8221 0x000fc550
59
52MODULE_DESCRIPTION("Vitesse PHY driver"); 60MODULE_DESCRIPTION("Vitesse PHY driver");
53MODULE_AUTHOR("Kriston Carson"); 61MODULE_AUTHOR("Kriston Carson");
54MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
@@ -95,13 +103,15 @@ static int vsc824x_ack_interrupt(struct phy_device *phydev)
95 return (err < 0) ? err : 0; 103 return (err < 0) ? err : 0;
96} 104}
97 105
98static int vsc824x_config_intr(struct phy_device *phydev) 106static int vsc82xx_config_intr(struct phy_device *phydev)
99{ 107{
100 int err; 108 int err;
101 109
102 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 110 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
103 err = phy_write(phydev, MII_VSC8244_IMASK, 111 err = phy_write(phydev, MII_VSC8244_IMASK,
104 MII_VSC8244_IMASK_MASK); 112 phydev->drv->phy_id == PHY_ID_VSC8244 ?
113 MII_VSC8244_IMASK_MASK :
114 MII_VSC8221_IMASK_MASK);
105 else { 115 else {
106 /* 116 /*
107 * The Vitesse PHY cannot clear the interrupt 117 * The Vitesse PHY cannot clear the interrupt
@@ -120,7 +130,7 @@ static int vsc824x_config_intr(struct phy_device *phydev)
120 130
121/* Vitesse 824x */ 131/* Vitesse 824x */
122static struct phy_driver vsc8244_driver = { 132static struct phy_driver vsc8244_driver = {
123 .phy_id = 0x000fc6c0, 133 .phy_id = PHY_ID_VSC8244,
124 .name = "Vitesse VSC8244", 134 .name = "Vitesse VSC8244",
125 .phy_id_mask = 0x000fffc0, 135 .phy_id_mask = 0x000fffc0,
126 .features = PHY_GBIT_FEATURES, 136 .features = PHY_GBIT_FEATURES,
@@ -129,19 +139,55 @@ static struct phy_driver vsc8244_driver = {
129 .config_aneg = &genphy_config_aneg, 139 .config_aneg = &genphy_config_aneg,
130 .read_status = &genphy_read_status, 140 .read_status = &genphy_read_status,
131 .ack_interrupt = &vsc824x_ack_interrupt, 141 .ack_interrupt = &vsc824x_ack_interrupt,
132 .config_intr = &vsc824x_config_intr, 142 .config_intr = &vsc82xx_config_intr,
133 .driver = { .owner = THIS_MODULE,}, 143 .driver = { .owner = THIS_MODULE,},
134}; 144};
135 145
136static int __init vsc8244_init(void) 146static int vsc8221_config_init(struct phy_device *phydev)
137{ 147{
138 return phy_driver_register(&vsc8244_driver); 148 int err;
149
150 err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
151 MII_VSC8221_AUXCONSTAT_INIT);
152 return err;
153
154 /* Perhaps we should set EXT_CON1 based on the interface?
155 Options are 802.3Z SerDes or SGMII */
156}
157
158/* Vitesse 8221 */
159static struct phy_driver vsc8221_driver = {
160 .phy_id = PHY_ID_VSC8221,
161 .phy_id_mask = 0x000ffff0,
162 .name = "Vitesse VSC8221",
163 .features = PHY_GBIT_FEATURES,
164 .flags = PHY_HAS_INTERRUPT,
165 .config_init = &vsc8221_config_init,
166 .config_aneg = &genphy_config_aneg,
167 .read_status = &genphy_read_status,
168 .ack_interrupt = &vsc824x_ack_interrupt,
169 .config_intr = &vsc82xx_config_intr,
170 .driver = { .owner = THIS_MODULE,},
171};
172
173static int __init vsc82xx_init(void)
174{
175 int err;
176
177 err = phy_driver_register(&vsc8244_driver);
178 if (err < 0)
179 return err;
180 err = phy_driver_register(&vsc8221_driver);
181 if (err < 0)
182 phy_driver_unregister(&vsc8244_driver);
183 return err;
139} 184}
140 185
141static void __exit vsc8244_exit(void) 186static void __exit vsc82xx_exit(void)
142{ 187{
143 phy_driver_unregister(&vsc8244_driver); 188 phy_driver_unregister(&vsc8244_driver);
189 phy_driver_unregister(&vsc8221_driver);
144} 190}
145 191
146module_init(vsc8244_init); 192module_init(vsc82xx_init);
147module_exit(vsc8244_exit); 193module_exit(vsc82xx_exit);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index fc6f4b8c64b3..b646e92134dc 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -399,11 +399,11 @@ static int pppoe_rcv(struct sk_buff *skb,
399 if (skb->len < len) 399 if (skb->len < len)
400 goto drop; 400 goto drop;
401 401
402 po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 402 if (pskb_trim_rcsum(skb, len))
403 if (!po)
404 goto drop; 403 goto drop;
405 404
406 if (pskb_trim_rcsum(skb, len)) 405 po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
406 if (!po)
407 goto drop; 407 goto drop;
408 408
409 return sk_receive_skb(sk_pppox(po), skb, 0); 409 return sk_receive_skb(sk_pppox(po), skb, 0);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 185b1dff10a8..e98d9773158d 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -1353,6 +1353,7 @@ static int pppol2tp_release(struct socket *sock)
1353 kfree_skb(skb); 1353 kfree_skb(skb);
1354 sock_put(sk); 1354 sock_put(sk);
1355 } 1355 }
1356 sock_put(sk);
1356 } 1357 }
1357 1358
1358 release_sock(sk); 1359 release_sock(sk);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 3cdd07c45b6d..508452c02151 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1515,9 +1515,6 @@ static u32 ql_get_link_state(struct ql3_adapter *qdev)
1515 linkState = LS_UP; 1515 linkState = LS_UP;
1516 } else { 1516 } else {
1517 linkState = LS_DOWN; 1517 linkState = LS_DOWN;
1518 if (netif_msg_link(qdev))
1519 printk(KERN_WARNING PFX
1520 "%s: Link is down.\n", qdev->ndev->name);
1521 } 1518 }
1522 return linkState; 1519 return linkState;
1523} 1520}
@@ -1581,10 +1578,6 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1581 ql_mac_enable(qdev, 1); 1578 ql_mac_enable(qdev, 1);
1582 } 1579 }
1583 1580
1584 if (netif_msg_link(qdev))
1585 printk(KERN_DEBUG PFX
1586 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1587 qdev->ndev->name);
1588 qdev->port_link_state = LS_UP; 1581 qdev->port_link_state = LS_UP;
1589 netif_start_queue(qdev->ndev); 1582 netif_start_queue(qdev->ndev);
1590 netif_carrier_on(qdev->ndev); 1583 netif_carrier_on(qdev->ndev);
@@ -1655,14 +1648,9 @@ static void ql_link_state_machine_work(struct work_struct *work)
1655 /* Fall Through */ 1648 /* Fall Through */
1656 1649
1657 case LS_DOWN: 1650 case LS_DOWN:
1658 if (netif_msg_link(qdev))
1659 printk(KERN_DEBUG PFX
1660 "%s: port_link_state = LS_DOWN.\n",
1661 qdev->ndev->name);
1662 if (curr_link_state == LS_UP) { 1651 if (curr_link_state == LS_UP) {
1663 if (netif_msg_link(qdev)) 1652 if (netif_msg_link(qdev))
1664 printk(KERN_DEBUG PFX 1653 printk(KERN_INFO PFX "%s: Link is up.\n",
1665 "%s: curr_link_state = LS_UP.\n",
1666 qdev->ndev->name); 1654 qdev->ndev->name);
1667 if (ql_is_auto_neg_complete(qdev)) 1655 if (ql_is_auto_neg_complete(qdev))
1668 ql_finish_auto_neg(qdev); 1656 ql_finish_auto_neg(qdev);
@@ -1670,6 +1658,7 @@ static void ql_link_state_machine_work(struct work_struct *work)
1670 if (qdev->port_link_state == LS_UP) 1658 if (qdev->port_link_state == LS_UP)
1671 ql_link_down_detect_clear(qdev); 1659 ql_link_down_detect_clear(qdev);
1672 1660
1661 qdev->port_link_state = LS_UP;
1673 } 1662 }
1674 break; 1663 break;
1675 1664
@@ -1678,12 +1667,14 @@ static void ql_link_state_machine_work(struct work_struct *work)
1678 * See if the link is currently down or went down and came 1667 * See if the link is currently down or went down and came
1679 * back up 1668 * back up
1680 */ 1669 */
1681 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) { 1670 if (curr_link_state == LS_DOWN) {
1682 if (netif_msg_link(qdev)) 1671 if (netif_msg_link(qdev))
1683 printk(KERN_INFO PFX "%s: Link is down.\n", 1672 printk(KERN_INFO PFX "%s: Link is down.\n",
1684 qdev->ndev->name); 1673 qdev->ndev->name);
1685 qdev->port_link_state = LS_DOWN; 1674 qdev->port_link_state = LS_DOWN;
1686 } 1675 }
1676 if (ql_link_down_detect(qdev))
1677 qdev->port_link_state = LS_DOWN;
1687 break; 1678 break;
1688 } 1679 }
1689 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1680 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 38116f9d4163..ba2e1c5b6bcf 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1375,7 +1375,6 @@ struct ql_adapter {
1375 spinlock_t adapter_lock; 1375 spinlock_t adapter_lock;
1376 spinlock_t hw_lock; 1376 spinlock_t hw_lock;
1377 spinlock_t stats_lock; 1377 spinlock_t stats_lock;
1378 spinlock_t legacy_lock; /* used for maintaining legacy intr sync */
1379 1378
1380 /* PCI Bus Relative Register Addresses */ 1379 /* PCI Bus Relative Register Addresses */
1381 void __iomem *reg_base; 1380 void __iomem *reg_base;
@@ -1399,8 +1398,6 @@ struct ql_adapter {
1399 struct msix_entry *msi_x_entry; 1398 struct msix_entry *msi_x_entry;
1400 struct intr_context intr_context[MAX_RX_RINGS]; 1399 struct intr_context intr_context[MAX_RX_RINGS];
1401 1400
1402 int (*legacy_check) (struct ql_adapter *);
1403
1404 int tx_ring_count; /* One per online CPU. */ 1401 int tx_ring_count; /* One per online CPU. */
1405 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */ 1402 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
1406 u32 rss_ring_count; /* One per online CPU. */ 1403 u32 rss_ring_count; /* One per online CPU. */
@@ -1502,7 +1499,7 @@ void ql_mpi_work(struct work_struct *work);
1502void ql_mpi_reset_work(struct work_struct *work); 1499void ql_mpi_reset_work(struct work_struct *work);
1503int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 1500int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1504void ql_queue_asic_error(struct ql_adapter *qdev); 1501void ql_queue_asic_error(struct ql_adapter *qdev);
1505void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 1502u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
1506void ql_set_ethtool_ops(struct net_device *ndev); 1503void ql_set_ethtool_ops(struct net_device *ndev);
1507int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); 1504int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
1508 1505
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 4b2caa6b7ac5..b83a9c9b6a97 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -577,41 +577,53 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
577 * incremented everytime we queue a worker and decremented everytime 577 * incremented everytime we queue a worker and decremented everytime
578 * a worker finishes. Once it hits zero we enable the interrupt. 578 * a worker finishes. Once it hits zero we enable the interrupt.
579 */ 579 */
580void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 580u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
581{ 581{
582 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) 582 u32 var = 0;
583 unsigned long hw_flags = 0;
584 struct intr_context *ctx = qdev->intr_context + intr;
585
586 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
587 /* Always enable if we're MSIX multi interrupts and
588 * it's not the default (zeroeth) interrupt.
589 */
583 ql_write32(qdev, INTR_EN, 590 ql_write32(qdev, INTR_EN,
584 qdev->intr_context[intr].intr_en_mask); 591 ctx->intr_en_mask);
585 else { 592 var = ql_read32(qdev, STS);
586 if (qdev->legacy_check) 593 return var;
587 spin_lock(&qdev->legacy_lock);
588 if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
589 QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
590 intr);
591 ql_write32(qdev, INTR_EN,
592 qdev->intr_context[intr].intr_en_mask);
593 } else {
594 QPRINTK(qdev, INTR, ERR,
595 "Skip enable, other queue(s) are active.\n");
596 }
597 if (qdev->legacy_check)
598 spin_unlock(&qdev->legacy_lock);
599 } 594 }
595
596 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
597 if (atomic_dec_and_test(&ctx->irq_cnt)) {
598 ql_write32(qdev, INTR_EN,
599 ctx->intr_en_mask);
600 var = ql_read32(qdev, STS);
601 }
602 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
603 return var;
600} 604}
601 605
602static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 606static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
603{ 607{
604 u32 var = 0; 608 u32 var = 0;
609 unsigned long hw_flags;
610 struct intr_context *ctx;
605 611
606 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) 612 /* HW disables for us if we're MSIX multi interrupts and
607 goto exit; 613 * it's not the default (zeroeth) interrupt.
608 else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) { 614 */
615 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
616 return 0;
617
618 ctx = qdev->intr_context + intr;
619 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
620 if (!atomic_read(&ctx->irq_cnt)) {
609 ql_write32(qdev, INTR_EN, 621 ql_write32(qdev, INTR_EN,
610 qdev->intr_context[intr].intr_dis_mask); 622 ctx->intr_dis_mask);
611 var = ql_read32(qdev, STS); 623 var = ql_read32(qdev, STS);
612 } 624 }
613 atomic_inc(&qdev->intr_context[intr].irq_cnt); 625 atomic_inc(&ctx->irq_cnt);
614exit: 626 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
615 return var; 627 return var;
616} 628}
617 629
@@ -623,7 +635,9 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
623 * and enables only if the result is zero. 635 * and enables only if the result is zero.
624 * So we precharge it here. 636 * So we precharge it here.
625 */ 637 */
626 atomic_set(&qdev->intr_context[i].irq_cnt, 1); 638 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
639 i == 0))
640 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
627 ql_enable_completion_interrupt(qdev, i); 641 ql_enable_completion_interrupt(qdev, i);
628 } 642 }
629 643
@@ -1725,19 +1739,6 @@ static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1725 return IRQ_HANDLED; 1739 return IRQ_HANDLED;
1726} 1740}
1727 1741
1728/* We check here to see if we're already handling a legacy
1729 * interrupt. If we are, then it must belong to another
1730 * chip with which we're sharing the interrupt line.
1731 */
1732int ql_legacy_check(struct ql_adapter *qdev)
1733{
1734 int err;
1735 spin_lock(&qdev->legacy_lock);
1736 err = atomic_read(&qdev->intr_context[0].irq_cnt);
1737 spin_unlock(&qdev->legacy_lock);
1738 return err;
1739}
1740
1741/* This handles a fatal error, MPI activity, and the default 1742/* This handles a fatal error, MPI activity, and the default
1742 * rx_ring in an MSI-X multiple vector environment. 1743 * rx_ring in an MSI-X multiple vector environment.
1743 * In MSI/Legacy environment it also process the rest of 1744 * In MSI/Legacy environment it also process the rest of
@@ -1752,12 +1753,15 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1752 int i; 1753 int i;
1753 int work_done = 0; 1754 int work_done = 0;
1754 1755
1755 if (qdev->legacy_check && qdev->legacy_check(qdev)) { 1756 spin_lock(&qdev->hw_lock);
1756 QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n"); 1757 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1757 return IRQ_NONE; /* Not our interrupt */ 1758 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1759 spin_unlock(&qdev->hw_lock);
1760 return IRQ_NONE;
1758 } 1761 }
1762 spin_unlock(&qdev->hw_lock);
1759 1763
1760 var = ql_read32(qdev, STS); 1764 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
1761 1765
1762 /* 1766 /*
1763 * Check for fatal error. 1767 * Check for fatal error.
@@ -1823,6 +1827,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1823 } 1827 }
1824 } 1828 }
1825 } 1829 }
1830 ql_enable_completion_interrupt(qdev, intr_context->intr);
1826 return work_done ? IRQ_HANDLED : IRQ_NONE; 1831 return work_done ? IRQ_HANDLED : IRQ_NONE;
1827} 1832}
1828 1833
@@ -2701,8 +2706,6 @@ msi:
2701 } 2706 }
2702 } 2707 }
2703 irq_type = LEG_IRQ; 2708 irq_type = LEG_IRQ;
2704 spin_lock_init(&qdev->legacy_lock);
2705 qdev->legacy_check = ql_legacy_check;
2706 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2709 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2707} 2710}
2708 2711
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index c821da21d8eb..4b7cb389dc49 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -81,6 +81,10 @@ static const int multicast_filter_limit = 32;
81#define RTL8169_TX_TIMEOUT (6*HZ) 81#define RTL8169_TX_TIMEOUT (6*HZ)
82#define RTL8169_PHY_TIMEOUT (10*HZ) 82#define RTL8169_PHY_TIMEOUT (10*HZ)
83 83
84#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
85#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
86#define RTL_EEPROM_SIG_ADDR 0x0000
87
84/* write/read MMIO register */ 88/* write/read MMIO register */
85#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) 89#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
86#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) 90#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
@@ -1911,74 +1915,6 @@ static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
1911 } 1915 }
1912} 1916}
1913 1917
1914static int rtl_eeprom_read(struct pci_dev *pdev, int cap, int addr, __le32 *val)
1915{
1916 int ret, count = 100;
1917 u16 status = 0;
1918 u32 value;
1919
1920 ret = pci_write_config_word(pdev, cap + PCI_VPD_ADDR, addr);
1921 if (ret < 0)
1922 return ret;
1923
1924 do {
1925 udelay(10);
1926 ret = pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &status);
1927 if (ret < 0)
1928 return ret;
1929 } while (!(status & PCI_VPD_ADDR_F) && --count);
1930
1931 if (!(status & PCI_VPD_ADDR_F))
1932 return -ETIMEDOUT;
1933
1934 ret = pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &value);
1935 if (ret < 0)
1936 return ret;
1937
1938 *val = cpu_to_le32(value);
1939
1940 return 0;
1941}
1942
1943static void rtl_init_mac_address(struct rtl8169_private *tp,
1944 void __iomem *ioaddr)
1945{
1946 struct pci_dev *pdev = tp->pci_dev;
1947 u8 cfg1;
1948 int vpd_cap;
1949 u8 mac[8];
1950 DECLARE_MAC_BUF(buf);
1951
1952 cfg1 = RTL_R8(Config1);
1953 if (!(cfg1 & VPD)) {
1954 dprintk("VPD access not enabled, enabling\n");
1955 RTL_W8(Cfg9346, Cfg9346_Unlock);
1956 RTL_W8(Config1, cfg1 | VPD);
1957 RTL_W8(Cfg9346, Cfg9346_Lock);
1958 }
1959
1960 vpd_cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
1961 if (!vpd_cap)
1962 return;
1963
1964 /* MAC address is stored in EEPROM at offset 0x0e
1965 * Realtek says: "The VPD address does not have to be a DWORD-aligned
1966 * address as defined in the PCI 2.2 Specifications, but the VPD data
1967 * is always consecutive 4-byte data starting from the VPD address
1968 * specified."
1969 */
1970 if (rtl_eeprom_read(pdev, vpd_cap, 0x000e, (__le32*)&mac[0]) < 0 ||
1971 rtl_eeprom_read(pdev, vpd_cap, 0x0012, (__le32*)&mac[4]) < 0) {
1972 dprintk("Reading MAC address from EEPROM failed\n");
1973 return;
1974 }
1975
1976 dprintk("MAC address found in EEPROM: %s\n", print_mac(buf, mac));
1977
1978 /* Write MAC address */
1979 rtl_rar_set(tp, mac);
1980}
1981
1982static int __devinit 1918static int __devinit
1983rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1919rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1984{ 1920{
@@ -2156,8 +2092,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2156 2092
2157 tp->mmio_addr = ioaddr; 2093 tp->mmio_addr = ioaddr;
2158 2094
2159 rtl_init_mac_address(tp, ioaddr);
2160
2161 /* Get MAC address */ 2095 /* Get MAC address */
2162 for (i = 0; i < MAC_ADDR_LEN; i++) 2096 for (i = 0; i < MAC_ADDR_LEN; i++)
2163 dev->dev_addr[i] = RTL_R8(MAC0 + i); 2097 dev->dev_addr[i] = RTL_R8(MAC0 + i);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index fa98af58223e..cd0d0873d978 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -174,8 +174,8 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
174 174
175/* EEPROM range with gPXE configuration */ 175/* EEPROM range with gPXE configuration */
176#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB 176#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
177#define EFX_ETHTOOL_EEPROM_MIN 0x100U 177#define EFX_ETHTOOL_EEPROM_MIN 0x800U
178#define EFX_ETHTOOL_EEPROM_MAX 0x400U 178#define EFX_ETHTOOL_EEPROM_MAX 0x1800U
179 179
180/************************************************************************** 180/**************************************************************************
181 * 181 *
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index a24bb68887ab..59f242a67714 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -927,7 +927,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
927 struct sh_eth_private *mdp = netdev_priv(ndev); 927 struct sh_eth_private *mdp = netdev_priv(ndev);
928 struct sh_eth_txdesc *txdesc; 928 struct sh_eth_txdesc *txdesc;
929 u32 entry; 929 u32 entry;
930 int flags; 930 unsigned long flags;
931 931
932 spin_lock_irqsave(&mdp->lock, flags); 932 spin_lock_irqsave(&mdp->lock, flags);
933 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 933 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
@@ -1141,7 +1141,7 @@ static int sh_mdio_init(struct net_device *ndev, int id)
1141 /* Hook up MII support for ethtool */ 1141 /* Hook up MII support for ethtool */
1142 mdp->mii_bus->name = "sh_mii"; 1142 mdp->mii_bus->name = "sh_mii";
1143 mdp->mii_bus->parent = &ndev->dev; 1143 mdp->mii_bus->parent = &ndev->dev;
1144 mdp->mii_bus->id[0] = id; 1144 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
1145 1145
1146 /* PHY IRQ */ 1146 /* PHY IRQ */
1147 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1147 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 3fe01763760e..e6e3bf58a569 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -317,6 +317,7 @@ static struct mii_chip_info {
317 unsigned int type; 317 unsigned int type;
318 u32 feature; 318 u32 feature;
319} mii_chip_table[] = { 319} mii_chip_table[] = {
320 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
320 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, 321 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
321 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, 322 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
322 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 }, 323 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index fa3a460f8e2f..8e8337e8b072 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1630,7 +1630,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1630 * sis900_interrupt - sis900 interrupt handler 1630 * sis900_interrupt - sis900 interrupt handler
1631 * @irq: the irq number 1631 * @irq: the irq number
1632 * @dev_instance: the client data object 1632 * @dev_instance: the client data object
1633 * @regs: snapshot of processor context
1634 * 1633 *
1635 * The interrupt handler does all of the Rx thread work, 1634 * The interrupt handler does all of the Rx thread work,
1636 * and cleans up after the Tx thread 1635 * and cleans up after the Tx thread
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 8aa7460ef0e3..9a16a79b67d0 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -155,23 +155,17 @@ static void PRINT_PKT(u_char *buf, int length)
155/* this enables an interrupt in the interrupt mask register */ 155/* this enables an interrupt in the interrupt mask register */
156#define SMC_ENABLE_INT(lp, x) do { \ 156#define SMC_ENABLE_INT(lp, x) do { \
157 unsigned int __mask; \ 157 unsigned int __mask; \
158 unsigned long __flags; \
159 spin_lock_irqsave(&lp->lock, __flags); \
160 __mask = SMC_GET_INT_EN((lp)); \ 158 __mask = SMC_GET_INT_EN((lp)); \
161 __mask |= (x); \ 159 __mask |= (x); \
162 SMC_SET_INT_EN((lp), __mask); \ 160 SMC_SET_INT_EN((lp), __mask); \
163 spin_unlock_irqrestore(&lp->lock, __flags); \
164} while (0) 161} while (0)
165 162
166/* this disables an interrupt from the interrupt mask register */ 163/* this disables an interrupt from the interrupt mask register */
167#define SMC_DISABLE_INT(lp, x) do { \ 164#define SMC_DISABLE_INT(lp, x) do { \
168 unsigned int __mask; \ 165 unsigned int __mask; \
169 unsigned long __flags; \
170 spin_lock_irqsave(&lp->lock, __flags); \
171 __mask = SMC_GET_INT_EN((lp)); \ 166 __mask = SMC_GET_INT_EN((lp)); \
172 __mask &= ~(x); \ 167 __mask &= ~(x); \
173 SMC_SET_INT_EN((lp), __mask); \ 168 SMC_SET_INT_EN((lp), __mask); \
174 spin_unlock_irqrestore(&lp->lock, __flags); \
175} while (0) 169} while (0)
176 170
177/* 171/*
@@ -180,7 +174,7 @@ static void PRINT_PKT(u_char *buf, int length)
180static void smc911x_reset(struct net_device *dev) 174static void smc911x_reset(struct net_device *dev)
181{ 175{
182 struct smc911x_local *lp = netdev_priv(dev); 176 struct smc911x_local *lp = netdev_priv(dev);
183 unsigned int reg, timeout=0, resets=1; 177 unsigned int reg, timeout=0, resets=1, irq_cfg;
184 unsigned long flags; 178 unsigned long flags;
185 179
186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 180 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
@@ -252,7 +246,12 @@ static void smc911x_reset(struct net_device *dev)
252 * Deassert IRQ for 1*10us for edge type interrupts 246 * Deassert IRQ for 1*10us for edge type interrupts
253 * and drive IRQ pin push-pull 247 * and drive IRQ pin push-pull
254 */ 248 */
255 SMC_SET_IRQ_CFG(lp, (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_); 249 irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
250#ifdef SMC_DYNAMIC_BUS_CONFIG
251 if (lp->cfg.irq_polarity)
252 irq_cfg |= INT_CFG_IRQ_POL_;
253#endif
254 SMC_SET_IRQ_CFG(lp, irq_cfg);
256 255
257 /* clear anything saved */ 256 /* clear anything saved */
258 if (lp->pending_tx_skb != NULL) { 257 if (lp->pending_tx_skb != NULL) {
@@ -274,6 +273,8 @@ static void smc911x_enable(struct net_device *dev)
274 273
275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 274 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
276 275
276 spin_lock_irqsave(&lp->lock, flags);
277
277 SMC_SET_MAC_ADDR(lp, dev->dev_addr); 278 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
278 279
279 /* Enable TX */ 280 /* Enable TX */
@@ -286,12 +287,10 @@ static void smc911x_enable(struct net_device *dev)
286 SMC_SET_FIFO_TSL(lp, 64); 287 SMC_SET_FIFO_TSL(lp, 64);
287 SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); 288 SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
288 289
289 spin_lock_irqsave(&lp->lock, flags);
290 SMC_GET_MAC_CR(lp, cr); 290 SMC_GET_MAC_CR(lp, cr);
291 cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; 291 cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
292 SMC_SET_MAC_CR(lp, cr); 292 SMC_SET_MAC_CR(lp, cr);
293 SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); 293 SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
294 spin_unlock_irqrestore(&lp->lock, flags);
295 294
296 /* Add 2 byte padding to start of packets */ 295 /* Add 2 byte padding to start of packets */
297 SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); 296 SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
@@ -300,9 +299,7 @@ static void smc911x_enable(struct net_device *dev)
300 if (cr & MAC_CR_RXEN_) 299 if (cr & MAC_CR_RXEN_)
301 DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); 300 DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
302 301
303 spin_lock_irqsave(&lp->lock, flags);
304 SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); 302 SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
305 spin_unlock_irqrestore(&lp->lock, flags);
306 303
307 /* Interrupt on every received packet */ 304 /* Interrupt on every received packet */
308 SMC_SET_FIFO_RSA(lp, 0x01); 305 SMC_SET_FIFO_RSA(lp, 0x01);
@@ -318,6 +315,8 @@ static void smc911x_enable(struct net_device *dev)
318 mask|=INT_EN_RDFO_EN_; 315 mask|=INT_EN_RDFO_EN_;
319 } 316 }
320 SMC_ENABLE_INT(lp, mask); 317 SMC_ENABLE_INT(lp, mask);
318
319 spin_unlock_irqrestore(&lp->lock, flags);
321} 320}
322 321
323/* 322/*
@@ -458,7 +457,6 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
458 struct sk_buff *skb; 457 struct sk_buff *skb;
459 unsigned int cmdA, cmdB, len; 458 unsigned int cmdA, cmdB, len;
460 unsigned char *buf; 459 unsigned char *buf;
461 unsigned long flags;
462 460
463 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__); 461 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
464 BUG_ON(lp->pending_tx_skb == NULL); 462 BUG_ON(lp->pending_tx_skb == NULL);
@@ -501,13 +499,11 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
501#else 499#else
502 SMC_PUSH_DATA(lp, buf, len); 500 SMC_PUSH_DATA(lp, buf, len);
503 dev->trans_start = jiffies; 501 dev->trans_start = jiffies;
504 dev_kfree_skb(skb); 502 dev_kfree_skb_irq(skb);
505#endif 503#endif
506 spin_lock_irqsave(&lp->lock, flags);
507 if (!lp->tx_throttle) { 504 if (!lp->tx_throttle) {
508 netif_wake_queue(dev); 505 netif_wake_queue(dev);
509 } 506 }
510 spin_unlock_irqrestore(&lp->lock, flags);
511 SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); 507 SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
512} 508}
513 509
@@ -526,6 +522,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 522 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
527 dev->name, __func__); 523 dev->name, __func__);
528 524
525 spin_lock_irqsave(&lp->lock, flags);
526
529 BUG_ON(lp->pending_tx_skb != NULL); 527 BUG_ON(lp->pending_tx_skb != NULL);
530 528
531 free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; 529 free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
@@ -535,12 +533,10 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
535 if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { 533 if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
536 DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n", 534 DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
537 dev->name, free); 535 dev->name, free);
538 spin_lock_irqsave(&lp->lock, flags);
539 /* Reenable when at least 1 packet of size MTU present */ 536 /* Reenable when at least 1 packet of size MTU present */
540 SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); 537 SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
541 lp->tx_throttle = 1; 538 lp->tx_throttle = 1;
542 netif_stop_queue(dev); 539 netif_stop_queue(dev);
543 spin_unlock_irqrestore(&lp->lock, flags);
544 } 540 }
545 541
546 /* Drop packets when we run out of space in TX FIFO 542 /* Drop packets when we run out of space in TX FIFO
@@ -556,6 +552,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
556 lp->pending_tx_skb = NULL; 552 lp->pending_tx_skb = NULL;
557 dev->stats.tx_errors++; 553 dev->stats.tx_errors++;
558 dev->stats.tx_dropped++; 554 dev->stats.tx_dropped++;
555 spin_unlock_irqrestore(&lp->lock, flags);
559 dev_kfree_skb(skb); 556 dev_kfree_skb(skb);
560 return 0; 557 return 0;
561 } 558 }
@@ -565,7 +562,6 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
565 /* If the DMA is already running then defer this packet Tx until 562 /* If the DMA is already running then defer this packet Tx until
566 * the DMA IRQ starts it 563 * the DMA IRQ starts it
567 */ 564 */
568 spin_lock_irqsave(&lp->lock, flags);
569 if (lp->txdma_active) { 565 if (lp->txdma_active) {
570 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name); 566 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
571 lp->pending_tx_skb = skb; 567 lp->pending_tx_skb = skb;
@@ -576,11 +572,11 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
576 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name); 572 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
577 lp->txdma_active = 1; 573 lp->txdma_active = 1;
578 } 574 }
579 spin_unlock_irqrestore(&lp->lock, flags);
580 } 575 }
581#endif 576#endif
582 lp->pending_tx_skb = skb; 577 lp->pending_tx_skb = skb;
583 smc911x_hardware_send_pkt(dev); 578 smc911x_hardware_send_pkt(dev);
579 spin_unlock_irqrestore(&lp->lock, flags);
584 580
585 return 0; 581 return 0;
586} 582}
@@ -1242,7 +1238,7 @@ smc911x_rx_dma_irq(int dma, void *data)
1242 netif_rx(skb); 1238 netif_rx(skb);
1243 1239
1244 spin_lock_irqsave(&lp->lock, flags); 1240 spin_lock_irqsave(&lp->lock, flags);
1245 pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; 1241 pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
1246 if (pkts != 0) { 1242 if (pkts != 0) {
1247 smc911x_rcv(dev); 1243 smc911x_rcv(dev);
1248 }else { 1244 }else {
@@ -1739,7 +1735,7 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
1739 * This routine has a simple purpose -- make the SMC chip generate an 1735 * This routine has a simple purpose -- make the SMC chip generate an
1740 * interrupt, so an auto-detect routine can detect it, and find the IRQ, 1736 * interrupt, so an auto-detect routine can detect it, and find the IRQ,
1741 */ 1737 */
1742static int __init smc911x_findirq(struct net_device *dev) 1738static int __devinit smc911x_findirq(struct net_device *dev)
1743{ 1739{
1744 struct smc911x_local *lp = netdev_priv(dev); 1740 struct smc911x_local *lp = netdev_priv(dev);
1745 int timeout = 20; 1741 int timeout = 20;
@@ -1803,7 +1799,7 @@ static int __init smc911x_findirq(struct net_device *dev)
1803 * o actually GRAB the irq. 1799 * o actually GRAB the irq.
1804 * o GRAB the region 1800 * o GRAB the region
1805 */ 1801 */
1806static int __init smc911x_probe(struct net_device *dev) 1802static int __devinit smc911x_probe(struct net_device *dev)
1807{ 1803{
1808 struct smc911x_local *lp = netdev_priv(dev); 1804 struct smc911x_local *lp = netdev_priv(dev);
1809 int i, retval; 1805 int i, retval;
@@ -1817,7 +1813,7 @@ static int __init smc911x_probe(struct net_device *dev)
1817 val = SMC_GET_BYTE_TEST(lp); 1813 val = SMC_GET_BYTE_TEST(lp);
1818 DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val); 1814 DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
1819 if (val != 0x87654321) { 1815 if (val != 0x87654321) {
1820 printk(KERN_ERR "Invalid chip endian 0x08%x\n",val); 1816 printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
1821 retval = -ENODEV; 1817 retval = -ENODEV;
1822 goto err_out; 1818 goto err_out;
1823 } 1819 }
@@ -2052,9 +2048,11 @@ err_out:
2052 * 0 --> there is a device 2048 * 0 --> there is a device
2053 * anything else, error 2049 * anything else, error
2054 */ 2050 */
2055static int smc911x_drv_probe(struct platform_device *pdev) 2051static int __devinit smc911x_drv_probe(struct platform_device *pdev)
2056{ 2052{
2057 struct smc91x_platdata *pd = pdev->dev.platform_data; 2053#ifdef SMC_DYNAMIC_BUS_CONFIG
2054 struct smc911x_platdata *pd = pdev->dev.platform_data;
2055#endif
2058 struct net_device *ndev; 2056 struct net_device *ndev;
2059 struct resource *res; 2057 struct resource *res;
2060 struct smc911x_local *lp; 2058 struct smc911x_local *lp;
@@ -2126,7 +2124,7 @@ out:
2126 return ret; 2124 return ret;
2127} 2125}
2128 2126
2129static int smc911x_drv_remove(struct platform_device *pdev) 2127static int __devexit smc911x_drv_remove(struct platform_device *pdev)
2130{ 2128{
2131 struct net_device *ndev = platform_get_drvdata(pdev); 2129 struct net_device *ndev = platform_get_drvdata(pdev);
2132 struct smc911x_local *lp = netdev_priv(ndev); 2130 struct smc911x_local *lp = netdev_priv(ndev);
@@ -2186,9 +2184,9 @@ static int smc911x_drv_resume(struct platform_device *dev)
2186 2184
2187 if (netif_running(ndev)) { 2185 if (netif_running(ndev)) {
2188 smc911x_reset(ndev); 2186 smc911x_reset(ndev);
2189 smc911x_enable(ndev);
2190 if (lp->phy_type != 0) 2187 if (lp->phy_type != 0)
2191 smc911x_phy_configure(&lp->phy_configure); 2188 smc911x_phy_configure(&lp->phy_configure);
2189 smc911x_enable(ndev);
2192 netif_device_attach(ndev); 2190 netif_device_attach(ndev);
2193 } 2191 }
2194 } 2192 }
@@ -2197,7 +2195,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
2197 2195
2198static struct platform_driver smc911x_driver = { 2196static struct platform_driver smc911x_driver = {
2199 .probe = smc911x_drv_probe, 2197 .probe = smc911x_drv_probe,
2200 .remove = smc911x_drv_remove, 2198 .remove = __devexit_p(smc911x_drv_remove),
2201 .suspend = smc911x_drv_suspend, 2199 .suspend = smc911x_drv_suspend,
2202 .resume = smc911x_drv_resume, 2200 .resume = smc911x_drv_resume,
2203 .driver = { 2201 .driver = {
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index bf6240f23f5d..cc7d85bdfb3e 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -50,6 +50,10 @@
50#define SMC_DYNAMIC_BUS_CONFIG 50#define SMC_DYNAMIC_BUS_CONFIG
51#endif 51#endif
52 52
53#ifdef SMC_USE_PXA_DMA
54#define SMC_USE_DMA
55#endif
56
53/* store this information for the driver.. */ 57/* store this information for the driver.. */
54struct smc911x_local { 58struct smc911x_local {
55 /* 59 /*
@@ -196,8 +200,6 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
196 200
197 201
198#ifdef SMC_USE_PXA_DMA 202#ifdef SMC_USE_PXA_DMA
199#define SMC_USE_DMA
200
201/* 203/*
202 * Define the request and free functions 204 * Define the request and free functions
203 * These are unfortunately architecture specific as no generic allocation 205 * These are unfortunately architecture specific as no generic allocation
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index c70870e0fd61..35c56abf4113 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1696,7 +1696,7 @@ static const struct ethtool_ops smc_ethtool_ops = {
1696 * I just deleted auto_irq.c, since it was never built... 1696 * I just deleted auto_irq.c, since it was never built...
1697 * --jgarzik 1697 * --jgarzik
1698 */ 1698 */
1699static int __init smc_findirq(struct smc_local *lp) 1699static int __devinit smc_findirq(struct smc_local *lp)
1700{ 1700{
1701 void __iomem *ioaddr = lp->base; 1701 void __iomem *ioaddr = lp->base;
1702 int timeout = 20; 1702 int timeout = 20;
@@ -1770,7 +1770,7 @@ static int __init smc_findirq(struct smc_local *lp)
1770 * o actually GRAB the irq. 1770 * o actually GRAB the irq.
1771 * o GRAB the region 1771 * o GRAB the region
1772 */ 1772 */
1773static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr, 1773static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
1774 unsigned long irq_flags) 1774 unsigned long irq_flags)
1775{ 1775{
1776 struct smc_local *lp = netdev_priv(dev); 1776 struct smc_local *lp = netdev_priv(dev);
@@ -2060,7 +2060,7 @@ static int smc_request_attrib(struct platform_device *pdev,
2060 struct net_device *ndev) 2060 struct net_device *ndev)
2061{ 2061{
2062 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2062 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2063 struct smc_local *lp = netdev_priv(ndev); 2063 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2064 2064
2065 if (!res) 2065 if (!res)
2066 return 0; 2066 return 0;
@@ -2075,7 +2075,7 @@ static void smc_release_attrib(struct platform_device *pdev,
2075 struct net_device *ndev) 2075 struct net_device *ndev)
2076{ 2076{
2077 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2077 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2078 struct smc_local *lp = netdev_priv(ndev); 2078 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2079 2079
2080 if (res) 2080 if (res)
2081 release_mem_region(res->start, ATTRIB_SIZE); 2081 release_mem_region(res->start, ATTRIB_SIZE);
@@ -2126,7 +2126,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2126 * 0 --> there is a device 2126 * 0 --> there is a device
2127 * anything else, error 2127 * anything else, error
2128 */ 2128 */
2129static int smc_drv_probe(struct platform_device *pdev) 2129static int __devinit smc_drv_probe(struct platform_device *pdev)
2130{ 2130{
2131 struct smc91x_platdata *pd = pdev->dev.platform_data; 2131 struct smc91x_platdata *pd = pdev->dev.platform_data;
2132 struct smc_local *lp; 2132 struct smc_local *lp;
@@ -2240,7 +2240,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2240 return ret; 2240 return ret;
2241} 2241}
2242 2242
2243static int smc_drv_remove(struct platform_device *pdev) 2243static int __devexit smc_drv_remove(struct platform_device *pdev)
2244{ 2244{
2245 struct net_device *ndev = platform_get_drvdata(pdev); 2245 struct net_device *ndev = platform_get_drvdata(pdev);
2246 struct smc_local *lp = netdev_priv(ndev); 2246 struct smc_local *lp = netdev_priv(ndev);
@@ -2305,7 +2305,7 @@ static int smc_drv_resume(struct platform_device *dev)
2305 2305
2306static struct platform_driver smc_driver = { 2306static struct platform_driver smc_driver = {
2307 .probe = smc_drv_probe, 2307 .probe = smc_drv_probe,
2308 .remove = smc_drv_remove, 2308 .remove = __devexit_p(smc_drv_remove),
2309 .suspend = smc_drv_suspend, 2309 .suspend = smc_drv_suspend,
2310 .resume = smc_drv_resume, 2310 .resume = smc_drv_resume,
2311 .driver = { 2311 .driver = {
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index b6435d0d71f9..07599b492359 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -672,7 +672,6 @@ write_hash:
672/** 672/**
673 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 673 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
674 * @card: card structure 674 * @card: card structure
675 * @descr: descriptor structure to fill out
676 * @skb: packet to use 675 * @skb: packet to use
677 * 676 *
678 * returns 0 on success, <0 on failure. 677 * returns 0 on success, <0 on failure.
@@ -867,7 +866,6 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
867/** 866/**
868 * spider_net_kick_tx_dma - enables TX DMA processing 867 * spider_net_kick_tx_dma - enables TX DMA processing
869 * @card: card structure 868 * @card: card structure
870 * @descr: descriptor address to enable TX processing at
871 * 869 *
872 * This routine will start the transmit DMA running if 870 * This routine will start the transmit DMA running if
873 * it is not already running. This routine ned only be 871 * it is not already running. This routine ned only be
@@ -1637,7 +1635,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1637 * spider_net_interrupt - interrupt handler for spider_net 1635 * spider_net_interrupt - interrupt handler for spider_net
1638 * @irq: interrupt number 1636 * @irq: interrupt number
1639 * @ptr: pointer to net_device 1637 * @ptr: pointer to net_device
1640 * @regs: PU registers
1641 * 1638 *
1642 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no 1639 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1643 * interrupt found raised by card. 1640 * interrupt found raised by card.
@@ -2419,7 +2416,6 @@ spider_net_undo_pci_setup(struct spider_net_card *card)
2419 2416
2420/** 2417/**
2421 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations 2418 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2422 * @card: card structure
2423 * @pdev: PCI device 2419 * @pdev: PCI device
2424 * 2420 *
2425 * Returns the card structure or NULL if any errors occur 2421 * Returns the card structure or NULL if any errors occur
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 1d2ef8f47780..5a40f2d78beb 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1509,6 +1509,11 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1509 desc->status = 0; 1509 desc->status = 0;
1510 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE; 1510 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1511 } 1511 }
1512
1513 if (*quota == 0) { /* out of rx quota */
1514 retcode = 1;
1515 goto out;
1516 }
1512 writew(np->rx_done, np->base + CompletionQConsumerIdx); 1517 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1513 1518
1514 out: 1519 out:
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4291458955ef..fed7eba65ead 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1142,6 +1142,70 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
1142 return NETDEV_TX_OK; 1142 return NETDEV_TX_OK;
1143} 1143}
1144 1144
1145static void gem_pcs_reset(struct gem *gp)
1146{
1147 int limit;
1148 u32 val;
1149
1150 /* Reset PCS unit. */
1151 val = readl(gp->regs + PCS_MIICTRL);
1152 val |= PCS_MIICTRL_RST;
1153 writel(val, gp->regs + PCS_MIICTRL);
1154
1155 limit = 32;
1156 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1157 udelay(100);
1158 if (limit-- <= 0)
1159 break;
1160 }
1161 if (limit <= 0)
1162 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1163 gp->dev->name);
1164}
1165
1166static void gem_pcs_reinit_adv(struct gem *gp)
1167{
1168 u32 val;
1169
1170 /* Make sure PCS is disabled while changing advertisement
1171 * configuration.
1172 */
1173 val = readl(gp->regs + PCS_CFG);
1174 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1175 writel(val, gp->regs + PCS_CFG);
1176
1177 /* Advertise all capabilities except assymetric
1178 * pause.
1179 */
1180 val = readl(gp->regs + PCS_MIIADV);
1181 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1182 PCS_MIIADV_SP | PCS_MIIADV_AP);
1183 writel(val, gp->regs + PCS_MIIADV);
1184
1185 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1186 * and re-enable PCS.
1187 */
1188 val = readl(gp->regs + PCS_MIICTRL);
1189 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1190 val &= ~PCS_MIICTRL_WB;
1191 writel(val, gp->regs + PCS_MIICTRL);
1192
1193 val = readl(gp->regs + PCS_CFG);
1194 val |= PCS_CFG_ENABLE;
1195 writel(val, gp->regs + PCS_CFG);
1196
1197 /* Make sure serialink loopback is off. The meaning
1198 * of this bit is logically inverted based upon whether
1199 * you are in Serialink or SERDES mode.
1200 */
1201 val = readl(gp->regs + PCS_SCTRL);
1202 if (gp->phy_type == phy_serialink)
1203 val &= ~PCS_SCTRL_LOOP;
1204 else
1205 val |= PCS_SCTRL_LOOP;
1206 writel(val, gp->regs + PCS_SCTRL);
1207}
1208
1145#define STOP_TRIES 32 1209#define STOP_TRIES 32
1146 1210
1147/* Must be invoked under gp->lock and gp->tx_lock. */ 1211/* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1168,6 +1232,9 @@ static void gem_reset(struct gem *gp)
1168 1232
1169 if (limit <= 0) 1233 if (limit <= 0)
1170 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1234 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
1235
1236 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
1237 gem_pcs_reinit_adv(gp);
1171} 1238}
1172 1239
1173/* Must be invoked under gp->lock and gp->tx_lock. */ 1240/* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1324,7 +1391,7 @@ static int gem_set_link_modes(struct gem *gp)
1324 gp->phy_type == phy_serdes) { 1391 gp->phy_type == phy_serdes) {
1325 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1392 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1326 1393
1327 if (pcs_lpa & PCS_MIIADV_FD) 1394 if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
1328 full_duplex = 1; 1395 full_duplex = 1;
1329 speed = SPEED_1000; 1396 speed = SPEED_1000;
1330 } 1397 }
@@ -1488,6 +1555,9 @@ static void gem_link_timer(unsigned long data)
1488 val = readl(gp->regs + PCS_MIISTAT); 1555 val = readl(gp->regs + PCS_MIISTAT);
1489 1556
1490 if ((val & PCS_MIISTAT_LS) != 0) { 1557 if ((val & PCS_MIISTAT_LS) != 0) {
1558 if (gp->lstate == link_up)
1559 goto restart;
1560
1491 gp->lstate = link_up; 1561 gp->lstate = link_up;
1492 netif_carrier_on(gp->dev); 1562 netif_carrier_on(gp->dev);
1493 (void)gem_set_link_modes(gp); 1563 (void)gem_set_link_modes(gp);
@@ -1708,61 +1778,8 @@ static void gem_init_phy(struct gem *gp)
1708 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1778 if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1709 gp->phy_mii.def->ops->init(&gp->phy_mii); 1779 gp->phy_mii.def->ops->init(&gp->phy_mii);
1710 } else { 1780 } else {
1711 u32 val; 1781 gem_pcs_reset(gp);
1712 int limit; 1782 gem_pcs_reinit_adv(gp);
1713
1714 /* Reset PCS unit. */
1715 val = readl(gp->regs + PCS_MIICTRL);
1716 val |= PCS_MIICTRL_RST;
1717 writeb(val, gp->regs + PCS_MIICTRL);
1718
1719 limit = 32;
1720 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1721 udelay(100);
1722 if (limit-- <= 0)
1723 break;
1724 }
1725 if (limit <= 0)
1726 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1727 gp->dev->name);
1728
1729 /* Make sure PCS is disabled while changing advertisement
1730 * configuration.
1731 */
1732 val = readl(gp->regs + PCS_CFG);
1733 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1734 writel(val, gp->regs + PCS_CFG);
1735
1736 /* Advertise all capabilities except assymetric
1737 * pause.
1738 */
1739 val = readl(gp->regs + PCS_MIIADV);
1740 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1741 PCS_MIIADV_SP | PCS_MIIADV_AP);
1742 writel(val, gp->regs + PCS_MIIADV);
1743
1744 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1745 * and re-enable PCS.
1746 */
1747 val = readl(gp->regs + PCS_MIICTRL);
1748 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1749 val &= ~PCS_MIICTRL_WB;
1750 writel(val, gp->regs + PCS_MIICTRL);
1751
1752 val = readl(gp->regs + PCS_CFG);
1753 val |= PCS_CFG_ENABLE;
1754 writel(val, gp->regs + PCS_CFG);
1755
1756 /* Make sure serialink loopback is off. The meaning
1757 * of this bit is logically inverted based upon whether
1758 * you are in Serialink or SERDES mode.
1759 */
1760 val = readl(gp->regs + PCS_SCTRL);
1761 if (gp->phy_type == phy_serialink)
1762 val &= ~PCS_SCTRL_LOOP;
1763 else
1764 val |= PCS_SCTRL_LOOP;
1765 writel(val, gp->regs + PCS_SCTRL);
1766 } 1783 }
1767 1784
1768 /* Default aneg parameters */ 1785 /* Default aneg parameters */
@@ -2680,6 +2697,21 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2680 cmd->speed = 0; 2697 cmd->speed = 0;
2681 cmd->duplex = cmd->port = cmd->phy_address = 2698 cmd->duplex = cmd->port = cmd->phy_address =
2682 cmd->transceiver = cmd->autoneg = 0; 2699 cmd->transceiver = cmd->autoneg = 0;
2700
2701 /* serdes means usually a Fibre connector, with most fixed */
2702 if (gp->phy_type == phy_serdes) {
2703 cmd->port = PORT_FIBRE;
2704 cmd->supported = (SUPPORTED_1000baseT_Half |
2705 SUPPORTED_1000baseT_Full |
2706 SUPPORTED_FIBRE | SUPPORTED_Autoneg |
2707 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
2708 cmd->advertising = cmd->supported;
2709 cmd->transceiver = XCVR_INTERNAL;
2710 if (gp->lstate == link_up)
2711 cmd->speed = SPEED_1000;
2712 cmd->duplex = DUPLEX_FULL;
2713 cmd->autoneg = 1;
2714 }
2683 } 2715 }
2684 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2716 cmd->maxtxpkt = cmd->maxrxpkt = 0;
2685 2717
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c41d68761364..e60498232b94 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1098,6 +1098,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1098 dma_addr_t tail_list_phys; 1098 dma_addr_t tail_list_phys;
1099 u8 *tail_buffer; 1099 u8 *tail_buffer;
1100 unsigned long flags; 1100 unsigned long flags;
1101 unsigned int txlen;
1101 1102
1102 if ( ! priv->phyOnline ) { 1103 if ( ! priv->phyOnline ) {
1103 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", 1104 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
@@ -1108,6 +1109,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1108 1109
1109 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE)) 1110 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1110 return 0; 1111 return 0;
1112 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1111 1113
1112 tail_list = priv->txList + priv->txTail; 1114 tail_list = priv->txList + priv->txTail;
1113 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1115 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
@@ -1125,16 +1127,16 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1125 1127
1126 if ( bbuf ) { 1128 if ( bbuf ) {
1127 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); 1129 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
1128 skb_copy_from_linear_data(skb, tail_buffer, skb->len); 1130 skb_copy_from_linear_data(skb, tail_buffer, txlen);
1129 } else { 1131 } else {
1130 tail_list->buffer[0].address = pci_map_single(priv->pciDev, 1132 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1131 skb->data, skb->len, 1133 skb->data, txlen,
1132 PCI_DMA_TODEVICE); 1134 PCI_DMA_TODEVICE);
1133 TLan_StoreSKB(tail_list, skb); 1135 TLan_StoreSKB(tail_list, skb);
1134 } 1136 }
1135 1137
1136 tail_list->frameSize = (u16) skb->len; 1138 tail_list->frameSize = (u16) txlen;
1137 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len; 1139 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1138 tail_list->buffer[1].count = 0; 1140 tail_list->buffer[1].count = 0;
1139 tail_list->buffer[1].address = 0; 1141 tail_list->buffer[1].address = 0;
1140 1142
@@ -1431,7 +1433,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1431 if ( ! bbuf ) { 1433 if ( ! bbuf ) {
1432 struct sk_buff *skb = TLan_GetSKB(head_list); 1434 struct sk_buff *skb = TLan_GetSKB(head_list);
1433 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, 1435 pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
1434 skb->len, PCI_DMA_TODEVICE); 1436 max(skb->len,
1437 (unsigned int)TLAN_MIN_FRAME_SIZE),
1438 PCI_DMA_TODEVICE);
1435 dev_kfree_skb_any(skb); 1439 dev_kfree_skb_any(skb);
1436 head_list->buffer[8].address = 0; 1440 head_list->buffer[8].address = 0;
1437 head_list->buffer[9].address = 0; 1441 head_list->buffer[9].address = 0;
@@ -2055,9 +2059,12 @@ static void TLan_FreeLists( struct net_device *dev )
2055 list = priv->txList + i; 2059 list = priv->txList + i;
2056 skb = TLan_GetSKB(list); 2060 skb = TLan_GetSKB(list);
2057 if ( skb ) { 2061 if ( skb ) {
2058 pci_unmap_single(priv->pciDev, 2062 pci_unmap_single(
2059 list->buffer[0].address, skb->len, 2063 priv->pciDev,
2060 PCI_DMA_TODEVICE); 2064 list->buffer[0].address,
2065 max(skb->len,
2066 (unsigned int)TLAN_MIN_FRAME_SIZE),
2067 PCI_DMA_TODEVICE);
2061 dev_kfree_skb_any( skb ); 2068 dev_kfree_skb_any( skb );
2062 list->buffer[8].address = 0; 2069 list->buffer[8].address = 0;
2063 list->buffer[9].address = 0; 2070 list->buffer[9].address = 0;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 8e46a513a252..c91852f49a48 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -420,9 +420,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
420 /* Allocate Tx/Rx descriptor memory */ 420 /* Allocate Tx/Rx descriptor memory */
421 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * 421 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
422 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 422 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
423 if (!db->desc_pool_ptr)
424 goto err_out_res;
423 425
424 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * 426 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
425 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 427 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
428 if (!db->buf_pool_ptr)
429 goto err_out_free_desc;
426 430
427 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 431 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
428 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 432 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
@@ -469,7 +473,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
469 473
470 err = register_netdev (dev); 474 err = register_netdev (dev);
471 if (err) 475 if (err)
472 goto err_out_res; 476 goto err_out_free_buf;
473 477
474 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, " 478 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, "
475 "%s, irq %d.\n", 479 "%s, irq %d.\n",
@@ -483,6 +487,12 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
483 487
484 return 0; 488 return 0;
485 489
490err_out_free_buf:
491 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
492 db->buf_pool_ptr, db->buf_pool_dma_ptr);
493err_out_free_desc:
494 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
495 db->desc_pool_ptr, db->desc_pool_dma_ptr);
486err_out_res: 496err_out_res:
487 pci_release_regions(pdev); 497 pci_release_regions(pdev);
488err_out_disable: 498err_out_disable:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6daea0c91862..33b6d1b122fb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1070,8 +1070,6 @@ static int tun_chr_close(struct inode *inode, struct file *file)
1070 1070
1071 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name); 1071 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1072 1072
1073 tun_chr_fasync(-1, file, 0);
1074
1075 rtnl_lock(); 1073 rtnl_lock();
1076 1074
1077 /* Detach from net device */ 1075 /* Detach from net device */
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index cfbbfee55836..68a7f5414133 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -37,7 +37,6 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <asm/types.h> 39#include <asm/types.h>
40#include <asm/uaccess.h>
41 40
42#include "ucc_geth.h" 41#include "ucc_geth.h"
43#include "ucc_geth_mii.h" 42#include "ucc_geth_mii.h"
@@ -324,17 +323,17 @@ static void uec_get_ethtool_stats(struct net_device *netdev,
324 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { 323 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
325 base = (u32 __iomem *)&ugeth->ug_regs->tx64; 324 base = (u32 __iomem *)&ugeth->ug_regs->tx64;
326 for (i = 0; i < UEC_HW_STATS_LEN; i++) 325 for (i = 0; i < UEC_HW_STATS_LEN; i++)
327 data[j++] = (u64)in_be32(&base[i]); 326 data[j++] = in_be32(&base[i]);
328 } 327 }
329 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { 328 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
330 base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram; 329 base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram;
331 for (i = 0; i < UEC_TX_FW_STATS_LEN; i++) 330 for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
332 data[j++] = (u64)in_be32(&base[i]); 331 data[j++] = base ? in_be32(&base[i]) : 0;
333 } 332 }
334 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { 333 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
335 base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram; 334 base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram;
336 for (i = 0; i < UEC_RX_FW_STATS_LEN; i++) 335 for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
337 data[j++] = (u64)in_be32(&base[i]); 336 data[j++] = base ? in_be32(&base[i]) : 0;
338 } 337 }
339} 338}
340 339
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 37ecf845edfe..de57490103fc 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1102,12 +1102,14 @@ static int ax88178_link_reset(struct usbnet *dev)
1102 mode = AX88178_MEDIUM_DEFAULT; 1102 mode = AX88178_MEDIUM_DEFAULT;
1103 1103
1104 if (ecmd.speed == SPEED_1000) 1104 if (ecmd.speed == SPEED_1000)
1105 mode |= AX_MEDIUM_GM | AX_MEDIUM_ENCK; 1105 mode |= AX_MEDIUM_GM;
1106 else if (ecmd.speed == SPEED_100) 1106 else if (ecmd.speed == SPEED_100)
1107 mode |= AX_MEDIUM_PS; 1107 mode |= AX_MEDIUM_PS;
1108 else 1108 else
1109 mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM); 1109 mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
1110 1110
1111 mode |= AX_MEDIUM_ENCK;
1112
1111 if (ecmd.duplex == DUPLEX_FULL) 1113 if (ecmd.duplex == DUPLEX_FULL)
1112 mode |= AX_MEDIUM_FD; 1114 mode |= AX_MEDIUM_FD;
1113 else 1115 else
@@ -1444,6 +1446,10 @@ static const struct usb_device_id products [] = {
1444 // Apple USB Ethernet Adapter 1446 // Apple USB Ethernet Adapter
1445 USB_DEVICE(0x05ac, 0x1402), 1447 USB_DEVICE(0x05ac, 0x1402),
1446 .driver_info = (unsigned long) &ax88772_info, 1448 .driver_info = (unsigned long) &ax88772_info,
1449}, {
1450 // Cables-to-Go USB Ethernet Adapter
1451 USB_DEVICE(0x0b95, 0x772a),
1452 .driver_info = (unsigned long) &ax88772_info,
1447}, 1453},
1448 { }, // END 1454 { }, // END
1449}; 1455};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 78df2be8a728..db3377dae9d5 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -396,6 +396,20 @@ static void dm9601_set_multicast(struct net_device *net)
396 dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl); 396 dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl);
397} 397}
398 398
399static int dm9601_set_mac_address(struct net_device *net, void *p)
400{
401 struct sockaddr *addr = p;
402 struct usbnet *dev = netdev_priv(net);
403
404 if (!is_valid_ether_addr(addr->sa_data))
405 return -EINVAL;
406
407 memcpy(net->dev_addr, addr->sa_data, net->addr_len);
408 dm_write_async(dev, DM_PHY_ADDR, net->addr_len, net->dev_addr);
409
410 return 0;
411}
412
399static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) 413static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
400{ 414{
401 int ret; 415 int ret;
@@ -406,6 +420,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
406 420
407 dev->net->do_ioctl = dm9601_ioctl; 421 dev->net->do_ioctl = dm9601_ioctl;
408 dev->net->set_multicast_list = dm9601_set_multicast; 422 dev->net->set_multicast_list = dm9601_set_multicast;
423 dev->net->set_mac_address = dm9601_set_mac_address;
409 dev->net->ethtool_ops = &dm9601_ethtool_ops; 424 dev->net->ethtool_ops = &dm9601_ethtool_ops;
410 dev->net->hard_header_len += DM_TX_OVERHEAD; 425 dev->net->hard_header_len += DM_TX_OVERHEAD;
411 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 426 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1164c52e2c0a..8e90891f0e42 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2184,19 +2184,20 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
2184 struct usb_interface *interface) 2184 struct usb_interface *interface)
2185{ 2185{
2186 struct hso_net *hso_net = dev2net(hso_dev); 2186 struct hso_net *hso_net = dev2net(hso_dev);
2187 struct device *dev = hso_dev->dev; 2187 struct device *dev = &hso_net->net->dev;
2188 char *rfkn; 2188 char *rfkn;
2189 2189
2190 hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev, 2190 hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev,
2191 RFKILL_TYPE_WLAN); 2191 RFKILL_TYPE_WWAN);
2192 if (!hso_net->rfkill) { 2192 if (!hso_net->rfkill) {
2193 dev_err(dev, "%s - Out of memory", __func__); 2193 dev_err(dev, "%s - Out of memory\n", __func__);
2194 return; 2194 return;
2195 } 2195 }
2196 rfkn = kzalloc(20, GFP_KERNEL); 2196 rfkn = kzalloc(20, GFP_KERNEL);
2197 if (!rfkn) { 2197 if (!rfkn) {
2198 rfkill_free(hso_net->rfkill); 2198 rfkill_free(hso_net->rfkill);
2199 dev_err(dev, "%s - Out of memory", __func__); 2199 hso_net->rfkill = NULL;
2200 dev_err(dev, "%s - Out of memory\n", __func__);
2200 return; 2201 return;
2201 } 2202 }
2202 snprintf(rfkn, 20, "hso-%d", 2203 snprintf(rfkn, 20, "hso-%d",
@@ -2209,7 +2210,8 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
2209 kfree(rfkn); 2210 kfree(rfkn);
2210 hso_net->rfkill->name = NULL; 2211 hso_net->rfkill->name = NULL;
2211 rfkill_free(hso_net->rfkill); 2212 rfkill_free(hso_net->rfkill);
2212 dev_err(dev, "%s - Failed to register rfkill", __func__); 2213 hso_net->rfkill = NULL;
2214 dev_err(dev, "%s - Failed to register rfkill\n", __func__);
2213 return; 2215 return;
2214 } 2216 }
2215} 2217}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 2dced383bcfb..11cb3e504e1c 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -521,7 +521,7 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
521 * we don't duplicate code for each option. 521 * we don't duplicate code for each option.
522 */ 522 */
523 523
524static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, char *devname) 524static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
525{ 525{
526 if (val == -1) 526 if (val == -1)
527 *opt = def; 527 *opt = def;
@@ -550,7 +550,7 @@ static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max,
550 * we don't duplicate code for each option. 550 * we don't duplicate code for each option.
551 */ 551 */
552 552
553static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, char *devname) 553static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, const char *devname)
554{ 554{
555 (*opt) &= (~flag); 555 (*opt) &= (~flag);
556 if (val == -1) 556 if (val == -1)
@@ -576,7 +576,7 @@ static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 fla
576 * for the current device 576 * for the current device
577 */ 577 */
578 578
579static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname) 579static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
580{ 580{
581 581
582 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); 582 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
@@ -863,6 +863,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
863 static int first = 1; 863 static int first = 1;
864 struct net_device *dev; 864 struct net_device *dev;
865 int i; 865 int i;
866 const char *drv_string;
866 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data]; 867 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
867 struct velocity_info *vptr; 868 struct velocity_info *vptr;
868 struct mac_regs __iomem * regs; 869 struct mac_regs __iomem * regs;
@@ -935,7 +936,9 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
935 dev->dev_addr[i] = readb(&regs->PAR[i]); 936 dev->dev_addr[i] = readb(&regs->PAR[i]);
936 937
937 938
938 velocity_get_options(&vptr->options, velocity_nics, dev->name); 939 drv_string = dev_driver_string(&pdev->dev);
940
941 velocity_get_options(&vptr->options, velocity_nics, drv_string);
939 942
940 /* 943 /*
941 * Mask out the options cannot be set to the chip 944 * Mask out the options cannot be set to the chip
@@ -2293,7 +2296,7 @@ static void velocity_set_multi(struct net_device *dev)
2293 } 2296 }
2294 2297
2295 mac_set_cam_mask(regs, vptr->mCAMmask); 2298 mac_set_cam_mask(regs, vptr->mCAMmask);
2296 rx_mode = (RCR_AM | RCR_AB); 2299 rx_mode = RCR_AM | RCR_AB | RCR_AP;
2297 } 2300 }
2298 if (dev->mtu > 1500) 2301 if (dev->mtu > 1500)
2299 rx_mode |= RCR_AL; 2302 rx_mode |= RCR_AL;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 327d58589e12..6e92f7b44b1a 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -756,10 +756,11 @@ static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
756 case CISCO_ADDR_REQ: 756 case CISCO_ADDR_REQ:
757 /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */ 757 /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
758 { 758 {
759 struct in_device *in_dev;
760 struct in_ifaddr *ifa;
761 __be32 addr = 0, mask = htonl(~0U); /* FIXME: is the mask correct? */ 759 __be32 addr = 0, mask = htonl(~0U); /* FIXME: is the mask correct? */
762#ifdef CONFIG_INET 760#ifdef CONFIG_INET
761 struct in_device *in_dev;
762 struct in_ifaddr *ifa;
763
763 rcu_read_lock(); 764 rcu_read_lock();
764 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) 765 if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
765 { 766 {
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index ccd9cd35ecbe..5bf7e01ef0e9 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -695,7 +695,6 @@ EXPORT_SYMBOL(z8530_nop);
695 * z8530_interrupt - Handle an interrupt from a Z8530 695 * z8530_interrupt - Handle an interrupt from a Z8530
696 * @irq: Interrupt number 696 * @irq: Interrupt number
697 * @dev_id: The Z8530 device that is interrupting. 697 * @dev_id: The Z8530 device that is interrupting.
698 * @regs: unused
699 * 698 *
700 * A Z85[2]30 device has stuck its hand in the air for attention. 699 * A Z85[2]30 device has stuck its hand in the air for attention.
701 * We scan both the channels on the chip for events and then call 700 * We scan both the channels on the chip for events and then call
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 9b95c4049b31..2d14255eb103 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -240,6 +240,10 @@ static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
240static void ath5k_reset_tsf(struct ieee80211_hw *hw); 240static void ath5k_reset_tsf(struct ieee80211_hw *hw);
241static int ath5k_beacon_update(struct ieee80211_hw *hw, 241static int ath5k_beacon_update(struct ieee80211_hw *hw,
242 struct sk_buff *skb); 242 struct sk_buff *skb);
243static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
244 struct ieee80211_vif *vif,
245 struct ieee80211_bss_conf *bss_conf,
246 u32 changes);
243 247
244static struct ieee80211_ops ath5k_hw_ops = { 248static struct ieee80211_ops ath5k_hw_ops = {
245 .tx = ath5k_tx, 249 .tx = ath5k_tx,
@@ -256,6 +260,7 @@ static struct ieee80211_ops ath5k_hw_ops = {
256 .get_tx_stats = ath5k_get_tx_stats, 260 .get_tx_stats = ath5k_get_tx_stats,
257 .get_tsf = ath5k_get_tsf, 261 .get_tsf = ath5k_get_tsf,
258 .reset_tsf = ath5k_reset_tsf, 262 .reset_tsf = ath5k_reset_tsf,
263 .bss_info_changed = ath5k_bss_info_changed,
259}; 264};
260 265
261/* 266/*
@@ -340,9 +345,9 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
340} 345}
341 346
342/* Interrupt handling */ 347/* Interrupt handling */
343static int ath5k_init(struct ath5k_softc *sc); 348static int ath5k_init(struct ath5k_softc *sc, bool is_resume);
344static int ath5k_stop_locked(struct ath5k_softc *sc); 349static int ath5k_stop_locked(struct ath5k_softc *sc);
345static int ath5k_stop_hw(struct ath5k_softc *sc); 350static int ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend);
346static irqreturn_t ath5k_intr(int irq, void *dev_id); 351static irqreturn_t ath5k_intr(int irq, void *dev_id);
347static void ath5k_tasklet_reset(unsigned long data); 352static void ath5k_tasklet_reset(unsigned long data);
348 353
@@ -646,7 +651,7 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
646 651
647 ath5k_led_off(sc); 652 ath5k_led_off(sc);
648 653
649 ath5k_stop_hw(sc); 654 ath5k_stop_hw(sc, true);
650 655
651 free_irq(pdev->irq, sc); 656 free_irq(pdev->irq, sc);
652 pci_save_state(pdev); 657 pci_save_state(pdev);
@@ -661,8 +666,7 @@ ath5k_pci_resume(struct pci_dev *pdev)
661{ 666{
662 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 667 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
663 struct ath5k_softc *sc = hw->priv; 668 struct ath5k_softc *sc = hw->priv;
664 struct ath5k_hw *ah = sc->ah; 669 int err;
665 int i, err;
666 670
667 pci_restore_state(pdev); 671 pci_restore_state(pdev);
668 672
@@ -683,21 +687,11 @@ ath5k_pci_resume(struct pci_dev *pdev)
683 goto err_no_irq; 687 goto err_no_irq;
684 } 688 }
685 689
686 err = ath5k_init(sc); 690 err = ath5k_init(sc, true);
687 if (err) 691 if (err)
688 goto err_irq; 692 goto err_irq;
689 ath5k_led_enable(sc); 693 ath5k_led_enable(sc);
690 694
691 /*
692 * Reset the key cache since some parts do not
693 * reset the contents on initial power up or resume.
694 *
695 * FIXME: This may need to be revisited when mac80211 becomes
696 * aware of suspend/resume.
697 */
698 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
699 ath5k_hw_reset_key(ah, i);
700
701 return 0; 695 return 0;
702err_irq: 696err_irq:
703 free_irq(pdev->irq, sc); 697 free_irq(pdev->irq, sc);
@@ -718,7 +712,6 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
718 struct ath5k_softc *sc = hw->priv; 712 struct ath5k_softc *sc = hw->priv;
719 struct ath5k_hw *ah = sc->ah; 713 struct ath5k_hw *ah = sc->ah;
720 u8 mac[ETH_ALEN]; 714 u8 mac[ETH_ALEN];
721 unsigned int i;
722 int ret; 715 int ret;
723 716
724 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device); 717 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
@@ -737,13 +730,6 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
737 __set_bit(ATH_STAT_MRRETRY, sc->status); 730 __set_bit(ATH_STAT_MRRETRY, sc->status);
738 731
739 /* 732 /*
740 * Reset the key cache since some parts do not
741 * reset the contents on initial power up.
742 */
743 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
744 ath5k_hw_reset_key(ah, i);
745
746 /*
747 * Collect the channel list. The 802.11 layer 733 * Collect the channel list. The 802.11 layer
748 * is resposible for filtering this list based 734 * is resposible for filtering this list based
749 * on settings like the phy mode and regulatory 735 * on settings like the phy mode and regulatory
@@ -2200,12 +2186,18 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2200\********************/ 2186\********************/
2201 2187
2202static int 2188static int
2203ath5k_init(struct ath5k_softc *sc) 2189ath5k_init(struct ath5k_softc *sc, bool is_resume)
2204{ 2190{
2205 int ret; 2191 struct ath5k_hw *ah = sc->ah;
2192 int ret, i;
2206 2193
2207 mutex_lock(&sc->lock); 2194 mutex_lock(&sc->lock);
2208 2195
2196 if (is_resume && !test_bit(ATH_STAT_STARTED, sc->status))
2197 goto out_ok;
2198
2199 __clear_bit(ATH_STAT_STARTED, sc->status);
2200
2209 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); 2201 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2210 2202
2211 /* 2203 /*
@@ -2230,12 +2222,22 @@ ath5k_init(struct ath5k_softc *sc)
2230 if (ret) 2222 if (ret)
2231 goto done; 2223 goto done;
2232 2224
2225 /*
2226 * Reset the key cache since some parts do not reset the
2227 * contents on initial power up or resume from suspend.
2228 */
2229 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
2230 ath5k_hw_reset_key(ah, i);
2231
2232 __set_bit(ATH_STAT_STARTED, sc->status);
2233
2233 /* Set ack to be sent at low bit-rates */ 2234 /* Set ack to be sent at low bit-rates */
2234 ath5k_hw_set_ack_bitrate_high(sc->ah, false); 2235 ath5k_hw_set_ack_bitrate_high(ah, false);
2235 2236
2236 mod_timer(&sc->calib_tim, round_jiffies(jiffies + 2237 mod_timer(&sc->calib_tim, round_jiffies(jiffies +
2237 msecs_to_jiffies(ath5k_calinterval * 1000))); 2238 msecs_to_jiffies(ath5k_calinterval * 1000)));
2238 2239
2240out_ok:
2239 ret = 0; 2241 ret = 0;
2240done: 2242done:
2241 mmiowb(); 2243 mmiowb();
@@ -2290,7 +2292,7 @@ ath5k_stop_locked(struct ath5k_softc *sc)
2290 * stop is preempted). 2292 * stop is preempted).
2291 */ 2293 */
2292static int 2294static int
2293ath5k_stop_hw(struct ath5k_softc *sc) 2295ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend)
2294{ 2296{
2295 int ret; 2297 int ret;
2296 2298
@@ -2321,6 +2323,9 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2321 } 2323 }
2322 } 2324 }
2323 ath5k_txbuf_free(sc, sc->bbuf); 2325 ath5k_txbuf_free(sc, sc->bbuf);
2326 if (!is_suspend)
2327 __clear_bit(ATH_STAT_STARTED, sc->status);
2328
2324 mmiowb(); 2329 mmiowb();
2325 mutex_unlock(&sc->lock); 2330 mutex_unlock(&sc->lock);
2326 2331
@@ -2718,12 +2723,12 @@ ath5k_reset_wake(struct ath5k_softc *sc)
2718 2723
2719static int ath5k_start(struct ieee80211_hw *hw) 2724static int ath5k_start(struct ieee80211_hw *hw)
2720{ 2725{
2721 return ath5k_init(hw->priv); 2726 return ath5k_init(hw->priv, false);
2722} 2727}
2723 2728
2724static void ath5k_stop(struct ieee80211_hw *hw) 2729static void ath5k_stop(struct ieee80211_hw *hw)
2725{ 2730{
2726 ath5k_stop_hw(hw->priv); 2731 ath5k_stop_hw(hw->priv, false);
2727} 2732}
2728 2733
2729static int ath5k_add_interface(struct ieee80211_hw *hw, 2734static int ath5k_add_interface(struct ieee80211_hw *hw,
@@ -2942,7 +2947,7 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2942 sc->opmode != NL80211_IFTYPE_MESH_POINT && 2947 sc->opmode != NL80211_IFTYPE_MESH_POINT &&
2943 test_bit(ATH_STAT_PROMISC, sc->status)) 2948 test_bit(ATH_STAT_PROMISC, sc->status))
2944 rfilt |= AR5K_RX_FILTER_PROM; 2949 rfilt |= AR5K_RX_FILTER_PROM;
2945 if (sc->opmode == NL80211_IFTYPE_STATION || 2950 if ((sc->opmode == NL80211_IFTYPE_STATION && sc->assoc) ||
2946 sc->opmode == NL80211_IFTYPE_ADHOC) { 2951 sc->opmode == NL80211_IFTYPE_ADHOC) {
2947 rfilt |= AR5K_RX_FILTER_BEACON; 2952 rfilt |= AR5K_RX_FILTER_BEACON;
2948 } 2953 }
@@ -3083,4 +3088,32 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3083end: 3088end:
3084 return ret; 3089 return ret;
3085} 3090}
3091static void
3092set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3093{
3094 struct ath5k_softc *sc = hw->priv;
3095 struct ath5k_hw *ah = sc->ah;
3096 u32 rfilt;
3097 rfilt = ath5k_hw_get_rx_filter(ah);
3098 if (enable)
3099 rfilt |= AR5K_RX_FILTER_BEACON;
3100 else
3101 rfilt &= ~AR5K_RX_FILTER_BEACON;
3102 ath5k_hw_set_rx_filter(ah, rfilt);
3103 sc->filter_flags = rfilt;
3104}
3086 3105
3106static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3107 struct ieee80211_vif *vif,
3108 struct ieee80211_bss_conf *bss_conf,
3109 u32 changes)
3110{
3111 struct ath5k_softc *sc = hw->priv;
3112 if (changes & BSS_CHANGED_ASSOC) {
3113 mutex_lock(&sc->lock);
3114 sc->assoc = bss_conf->assoc;
3115 if (sc->opmode == NL80211_IFTYPE_STATION)
3116 set_beacon_filter(hw, sc->assoc);
3117 mutex_unlock(&sc->lock);
3118 }
3119}
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 9d0b728928e3..facc60ddada2 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -128,11 +128,12 @@ struct ath5k_softc {
128 size_t desc_len; /* size of TX/RX descriptors */ 128 size_t desc_len; /* size of TX/RX descriptors */
129 u16 cachelsz; /* cache line size */ 129 u16 cachelsz; /* cache line size */
130 130
131 DECLARE_BITMAP(status, 4); 131 DECLARE_BITMAP(status, 5);
132#define ATH_STAT_INVALID 0 /* disable hardware accesses */ 132#define ATH_STAT_INVALID 0 /* disable hardware accesses */
133#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */ 133#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
134#define ATH_STAT_PROMISC 2 134#define ATH_STAT_PROMISC 2
135#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */ 135#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
136#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
136 137
137 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ 138 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
138 unsigned int curmode; /* current phy mode */ 139 unsigned int curmode; /* current phy mode */
@@ -178,6 +179,7 @@ struct ath5k_softc {
178 179
179 struct timer_list calib_tim; /* calibration timer */ 180 struct timer_list calib_tim; /* calibration timer */
180 int power_level; /* Requested tx power in dbm */ 181 int power_level; /* Requested tx power in dbm */
182 bool assoc; /* assocate state */
181}; 183};
182 184
183#define ath5k_hw_hasbssidmask(_ah) \ 185#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 8f92d670f614..ccaeb5c219d2 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -339,7 +339,7 @@ static struct {
339 { ATH5K_DEBUG_BEACON, "beacon", "beacon handling" }, 339 { ATH5K_DEBUG_BEACON, "beacon", "beacon handling" },
340 { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" }, 340 { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
341 { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" }, 341 { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
342 { ATH5K_DEBUG_LED, "led", "LED mamagement" }, 342 { ATH5K_DEBUG_LED, "led", "LED management" },
343 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" }, 343 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
344 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, 344 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
345 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 345 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
@@ -417,19 +417,19 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
417 sc->debug.debugfs_phydir = debugfs_create_dir(wiphy_name(sc->hw->wiphy), 417 sc->debug.debugfs_phydir = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
418 ath5k_global_debugfs); 418 ath5k_global_debugfs);
419 419
420 sc->debug.debugfs_debug = debugfs_create_file("debug", 0666, 420 sc->debug.debugfs_debug = debugfs_create_file("debug", S_IWUSR | S_IRUGO,
421 sc->debug.debugfs_phydir, sc, &fops_debug); 421 sc->debug.debugfs_phydir, sc, &fops_debug);
422 422
423 sc->debug.debugfs_registers = debugfs_create_file("registers", 0444, 423 sc->debug.debugfs_registers = debugfs_create_file("registers", S_IRUGO,
424 sc->debug.debugfs_phydir, sc, &fops_registers); 424 sc->debug.debugfs_phydir, sc, &fops_registers);
425 425
426 sc->debug.debugfs_tsf = debugfs_create_file("tsf", 0666, 426 sc->debug.debugfs_tsf = debugfs_create_file("tsf", S_IWUSR | S_IRUGO,
427 sc->debug.debugfs_phydir, sc, &fops_tsf); 427 sc->debug.debugfs_phydir, sc, &fops_tsf);
428 428
429 sc->debug.debugfs_beacon = debugfs_create_file("beacon", 0666, 429 sc->debug.debugfs_beacon = debugfs_create_file("beacon", S_IWUSR | S_IRUGO,
430 sc->debug.debugfs_phydir, sc, &fops_beacon); 430 sc->debug.debugfs_phydir, sc, &fops_beacon);
431 431
432 sc->debug.debugfs_reset = debugfs_create_file("reset", 0222, 432 sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
433 sc->debug.debugfs_phydir, sc, &fops_reset); 433 sc->debug.debugfs_phydir, sc, &fops_reset);
434} 434}
435 435
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
index dd1374052ba9..5e362a7a3620 100644
--- a/drivers/net/wireless/ath5k/desc.c
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -531,10 +531,10 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
531 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL); 531 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
532 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 532 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
533 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE); 533 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
534 rs->rs_antenna = rx_status->rx_status_0 & 534 rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
535 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA; 535 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA);
536 rs->rs_more = rx_status->rx_status_0 & 536 rs->rs_more = !!(rx_status->rx_status_0 &
537 AR5K_5210_RX_DESC_STATUS0_MORE; 537 AR5K_5210_RX_DESC_STATUS0_MORE);
538 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */ 538 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
539 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 539 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
540 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 540 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
@@ -607,10 +607,10 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
607 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL); 607 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
608 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 608 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
609 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE); 609 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
610 rs->rs_antenna = rx_status->rx_status_0 & 610 rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
611 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA; 611 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
612 rs->rs_more = rx_status->rx_status_0 & 612 rs->rs_more = !!(rx_status->rx_status_0 &
613 AR5K_5212_RX_DESC_STATUS0_MORE; 613 AR5K_5212_RX_DESC_STATUS0_MORE);
614 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 614 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
615 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 615 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
616 rs->rs_status = 0; 616 rs->rs_status = 0;
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index ea2e1a20b499..ceaa6c475c06 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -806,6 +806,8 @@ static const struct ath5k_ini_mode ar5212_rf5111_ini_mode_end[] = {
806 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 806 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
807 { AR5K_PHY(642), 807 { AR5K_PHY(642),
808 { 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, 808 { 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
809 { 0xa228,
810 { 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5 } },
809 { 0xa23c, 811 { 0xa23c,
810 { 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af } }, 812 { 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af } },
811}; 813};
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
index 8f1886834e61..1b6d45b6772d 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -537,9 +537,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
537 mdelay(1); 537 mdelay(1);
538 538
539 /* 539 /*
540 * Write some more initial register settings 540 * Write some more initial register settings for revised chips
541 */ 541 */
542 if (ah->ah_version == AR5K_AR5212) { 542 if (ah->ah_version == AR5K_AR5212 &&
543 ah->ah_phy_revision > 0x41) {
543 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c); 544 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
544 545
545 if (channel->hw_value == CHANNEL_G) 546 if (channel->hw_value == CHANNEL_G)
@@ -558,19 +559,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
558 else 559 else
559 ath5k_hw_reg_write(ah, 0x00000000, 0x994c); 560 ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
560 561
561 /* Some bits are disabled here, we know nothing about 562 /* Got this from legacy-hal */
562 * register 0xa228 yet, most of the times this ends up 563 AR5K_REG_DISABLE_BITS(ah, 0xa228, 0x200);
563 * with a value 0x9b5 -haven't seen any dump with 564
564 * a different value- */ 565 AR5K_REG_MASKED_BITS(ah, 0xa228, 0x800, 0xfffe03ff);
565 /* Got this from decompiling binary HAL */
566 data = ath5k_hw_reg_read(ah, 0xa228);
567 data &= 0xfffffdff;
568 ath5k_hw_reg_write(ah, data, 0xa228);
569
570 data = ath5k_hw_reg_read(ah, 0xa228);
571 data &= 0xfffe03ff;
572 ath5k_hw_reg_write(ah, data, 0xa228);
573 data = 0;
574 566
575 /* Just write 0x9b5 ? */ 567 /* Just write 0x9b5 ? */
576 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */ 568 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 9e15c30bbc06..4dd1c1bda0fb 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -170,7 +170,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
170 skb = (struct sk_buff *)bf->bf_mpdu; 170 skb = (struct sk_buff *)bf->bf_mpdu;
171 if (skb) { 171 if (skb) {
172 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 172 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
173 skb_end_pointer(skb) - skb->head, 173 skb->len,
174 PCI_DMA_TODEVICE); 174 PCI_DMA_TODEVICE);
175 } 175 }
176 176
@@ -193,7 +193,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
193 193
194 bf->bf_buf_addr = bf->bf_dmacontext = 194 bf->bf_buf_addr = bf->bf_dmacontext =
195 pci_map_single(sc->pdev, skb->data, 195 pci_map_single(sc->pdev, skb->data,
196 skb_end_pointer(skb) - skb->head, 196 skb->len,
197 PCI_DMA_TODEVICE); 197 PCI_DMA_TODEVICE);
198 198
199 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data); 199 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
@@ -352,7 +352,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
352 if (bf->bf_mpdu != NULL) { 352 if (bf->bf_mpdu != NULL) {
353 skb = (struct sk_buff *)bf->bf_mpdu; 353 skb = (struct sk_buff *)bf->bf_mpdu;
354 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 354 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
355 skb_end_pointer(skb) - skb->head, 355 skb->len,
356 PCI_DMA_TODEVICE); 356 PCI_DMA_TODEVICE);
357 dev_kfree_skb_any(skb); 357 dev_kfree_skb_any(skb);
358 bf->bf_mpdu = NULL; 358 bf->bf_mpdu = NULL;
@@ -412,7 +412,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
412 412
413 bf->bf_buf_addr = bf->bf_dmacontext = 413 bf->bf_buf_addr = bf->bf_dmacontext =
414 pci_map_single(sc->pdev, skb->data, 414 pci_map_single(sc->pdev, skb->data,
415 skb_end_pointer(skb) - skb->head, 415 skb->len,
416 PCI_DMA_TODEVICE); 416 PCI_DMA_TODEVICE);
417 bf->bf_mpdu = skb; 417 bf->bf_mpdu = skb;
418 418
@@ -439,7 +439,7 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
439 if (bf->bf_mpdu != NULL) { 439 if (bf->bf_mpdu != NULL) {
440 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 440 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
441 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 441 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
442 skb_end_pointer(skb) - skb->head, 442 skb->len,
443 PCI_DMA_TODEVICE); 443 PCI_DMA_TODEVICE);
444 dev_kfree_skb_any(skb); 444 dev_kfree_skb_any(skb);
445 bf->bf_mpdu = NULL; 445 bf->bf_mpdu = NULL;
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 4983402af559..504a0444d89f 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -49,10 +49,12 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
49 ASSERT(skb != NULL); 49 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data; 50 ds->ds_vdata = skb->data;
51 51
52 /* setup rx descriptors */ 52 /* setup rx descriptors. The sc_rxbufsize here tells the harware
53 * how much data it can DMA to us and that we are prepared
54 * to process */
53 ath9k_hw_setuprxdesc(ah, 55 ath9k_hw_setuprxdesc(ah,
54 ds, 56 ds,
55 skb_tailroom(skb), /* buffer size */ 57 sc->sc_rxbufsize,
56 0); 58 0);
57 59
58 if (sc->sc_rxlink == NULL) 60 if (sc->sc_rxlink == NULL)
@@ -398,6 +400,13 @@ static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
398 * in rx'd frames. 400 * in rx'd frames.
399 */ 401 */
400 402
403 /* Note: the kernel can allocate a value greater than
404 * what we ask it to give us. We really only need 4 KB as that
405 * is this hardware supports and in fact we need at least 3849
406 * as that is the MAX AMSDU size this hardware supports.
407 * Unfortunately this means we may get 8 KB here from the
408 * kernel... and that is actually what is observed on some
409 * systems :( */
401 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1); 410 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
402 if (skb != NULL) { 411 if (skb != NULL) {
403 off = ((unsigned long) skb->data) % sc->sc_cachelsz; 412 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
@@ -456,7 +465,7 @@ static int ath_rx_indicate(struct ath_softc *sc,
456 if (nskb != NULL) { 465 if (nskb != NULL) {
457 bf->bf_mpdu = nskb; 466 bf->bf_mpdu = nskb;
458 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data, 467 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
459 skb_end_pointer(nskb) - nskb->head, 468 sc->sc_rxbufsize,
460 PCI_DMA_FROMDEVICE); 469 PCI_DMA_FROMDEVICE);
461 bf->bf_dmacontext = bf->bf_buf_addr; 470 bf->bf_dmacontext = bf->bf_buf_addr;
462 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; 471 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
@@ -542,7 +551,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
542 551
543 bf->bf_mpdu = skb; 552 bf->bf_mpdu = skb;
544 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, 553 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
545 skb_end_pointer(skb) - skb->head, 554 sc->sc_rxbufsize,
546 PCI_DMA_FROMDEVICE); 555 PCI_DMA_FROMDEVICE);
547 bf->bf_dmacontext = bf->bf_buf_addr; 556 bf->bf_dmacontext = bf->bf_buf_addr;
548 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; 557 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
@@ -1007,7 +1016,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
1007 1016
1008 pci_dma_sync_single_for_cpu(sc->pdev, 1017 pci_dma_sync_single_for_cpu(sc->pdev,
1009 bf->bf_buf_addr, 1018 bf->bf_buf_addr,
1010 skb_tailroom(skb), 1019 sc->sc_rxbufsize,
1011 PCI_DMA_FROMDEVICE); 1020 PCI_DMA_FROMDEVICE);
1012 pci_unmap_single(sc->pdev, 1021 pci_unmap_single(sc->pdev,
1013 bf->bf_buf_addr, 1022 bf->bf_buf_addr,
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index ffdf4876121b..a68f97c39359 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -918,9 +918,12 @@ struct hostap_interface {
918 918
919/* 919/*
920 * TX meta data - stored in skb->cb buffer, so this must not be increased over 920 * TX meta data - stored in skb->cb buffer, so this must not be increased over
921 * the 40-byte limit 921 * the 48-byte limit.
922 * THE PADDING THIS STARTS WITH IS A HORRIBLE HACK THAT SHOULD NOT LIVE
923 * TO SEE THE DAY.
922 */ 924 */
923struct hostap_skb_tx_data { 925struct hostap_skb_tx_data {
926 unsigned int __padding_for_default_qdiscs;
924 u32 magic; /* HOSTAP_SKB_TX_DATA_MAGIC */ 927 u32 magic; /* HOSTAP_SKB_TX_DATA_MAGIC */
925 u8 rate; /* transmit rate */ 928 u8 rate; /* transmit rate */
926#define HOSTAP_TX_FLAGS_WDS BIT(0) 929#define HOSTAP_TX_FLAGS_WDS BIT(0)
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index dcce3542d5a7..7a9f901d4ff6 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -3897,6 +3897,7 @@ static int ipw_disassociate(void *data)
3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3898 return 0; 3898 return 0;
3899 ipw_send_disassociate(data, 0); 3899 ipw_send_disassociate(data, 0);
3900 netif_carrier_off(priv->net_dev);
3900 return 1; 3901 return 1;
3901} 3902}
3902 3903
@@ -10190,6 +10191,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10190 u16 remaining_bytes; 10191 u16 remaining_bytes;
10191 int fc; 10192 int fc;
10192 10193
10194 if (!(priv->status & STATUS_ASSOCIATED))
10195 goto drop;
10196
10193 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10197 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10194 switch (priv->ieee->iw_mode) { 10198 switch (priv->ieee->iw_mode) {
10195 case IW_MODE_ADHOC: 10199 case IW_MODE_ADHOC:
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 24a1aeb6448f..c4c0371c763b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1384,9 +1384,11 @@ void iwl_rx_handle(struct iwl_priv *priv)
1384 1384
1385 rxq->queue[i] = NULL; 1385 rxq->queue[i] = NULL;
1386 1386
1387 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 1387 dma_sync_single_range_for_cpu(
1388 priv->hw_params.rx_buf_size, 1388 &priv->pci_dev->dev, rxb->real_dma_addr,
1389 PCI_DMA_FROMDEVICE); 1389 rxb->aligned_dma_addr - rxb->real_dma_addr,
1390 priv->hw_params.rx_buf_size,
1391 PCI_DMA_FROMDEVICE);
1390 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1392 pkt = (struct iwl_rx_packet *)rxb->skb->data;
1391 1393
1392 /* Reclaim a command buffer only if this packet is a response 1394 /* Reclaim a command buffer only if this packet is a response
@@ -1436,8 +1438,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
1436 rxb->skb = NULL; 1438 rxb->skb = NULL;
1437 } 1439 }
1438 1440
1439 pci_unmap_single(priv->pci_dev, rxb->dma_addr, 1441 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1440 priv->hw_params.rx_buf_size, 1442 priv->hw_params.rx_buf_size + 256,
1441 PCI_DMA_FROMDEVICE); 1443 PCI_DMA_FROMDEVICE);
1442 spin_lock_irqsave(&rxq->lock, flags); 1444 spin_lock_irqsave(&rxq->lock, flags);
1443 list_add_tail(&rxb->list, &priv->rxq.rx_used); 1445 list_add_tail(&rxb->list, &priv->rxq.rx_used);
@@ -2090,7 +2092,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2090 iwl4965_error_recovery(priv); 2092 iwl4965_error_recovery(priv);
2091 2093
2092 iwl_power_update_mode(priv, 1); 2094 iwl_power_update_mode(priv, 1);
2093 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
2094 2095
2095 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status)) 2096 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
2096 iwl4965_set_mode(priv, priv->iw_mode); 2097 iwl4965_set_mode(priv, priv->iw_mode);
@@ -3252,7 +3253,11 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
3252 return; 3253 return;
3253 } 3254 }
3254 3255
3255 iwl_scan_cancel_timeout(priv, 100); 3256 if (iwl_scan_cancel(priv)) {
3257 /* cancel scan failed, just live w/ bad key and rely
3258 briefly on SW decryption */
3259 return;
3260 }
3256 3261
3257 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); 3262 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3258 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 3263 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4c312c55f90c..01a845851338 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -290,6 +290,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
290 priv->num_stations = 0; 290 priv->num_stations = 0;
291 memset(priv->stations, 0, sizeof(priv->stations)); 291 memset(priv->stations, 0, sizeof(priv->stations));
292 292
293 /* clean ucode key table bit map */
294 priv->ucode_key_table = 0;
295
293 spin_unlock_irqrestore(&priv->sta_lock, flags); 296 spin_unlock_irqrestore(&priv->sta_lock, flags);
294} 297}
295EXPORT_SYMBOL(iwl_clear_stations_table); 298EXPORT_SYMBOL(iwl_clear_stations_table);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index c018121085e9..9966d4e384ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -89,7 +89,8 @@ extern struct iwl_cfg iwl5100_abg_cfg;
89#define DEFAULT_LONG_RETRY_LIMIT 4U 89#define DEFAULT_LONG_RETRY_LIMIT 4U
90 90
91struct iwl_rx_mem_buffer { 91struct iwl_rx_mem_buffer {
92 dma_addr_t dma_addr; 92 dma_addr_t real_dma_addr;
93 dma_addr_t aligned_dma_addr;
93 struct sk_buff *skb; 94 struct sk_buff *skb;
94 struct list_head list; 95 struct list_head list;
95}; 96};
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 7cde9d76ff5d..0509c16dbe75 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -204,7 +204,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
204 list_del(element); 204 list_del(element);
205 205
206 /* Point to Rx buffer via next RBD in circular buffer */ 206 /* Point to Rx buffer via next RBD in circular buffer */
207 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); 207 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
208 rxq->queue[rxq->write] = rxb; 208 rxq->queue[rxq->write] = rxb;
209 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 209 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
210 rxq->free_count--; 210 rxq->free_count--;
@@ -251,7 +251,7 @@ void iwl_rx_allocate(struct iwl_priv *priv)
251 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 251 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
252 252
253 /* Alloc a new receive buffer */ 253 /* Alloc a new receive buffer */
254 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size, 254 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
255 __GFP_NOWARN | GFP_ATOMIC); 255 __GFP_NOWARN | GFP_ATOMIC);
256 if (!rxb->skb) { 256 if (!rxb->skb) {
257 if (net_ratelimit()) 257 if (net_ratelimit())
@@ -266,9 +266,17 @@ void iwl_rx_allocate(struct iwl_priv *priv)
266 list_del(element); 266 list_del(element);
267 267
268 /* Get physical address of RB/SKB */ 268 /* Get physical address of RB/SKB */
269 rxb->dma_addr = 269 rxb->real_dma_addr = pci_map_single(
270 pci_map_single(priv->pci_dev, rxb->skb->data, 270 priv->pci_dev,
271 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE); 271 rxb->skb->data,
272 priv->hw_params.rx_buf_size + 256,
273 PCI_DMA_FROMDEVICE);
274 /* dma address must be no more than 36 bits */
275 BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
276 /* and also 256 byte aligned! */
277 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
278 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
279
272 list_add_tail(&rxb->list, &rxq->rx_free); 280 list_add_tail(&rxb->list, &rxq->rx_free);
273 rxq->free_count++; 281 rxq->free_count++;
274 } 282 }
@@ -300,8 +308,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
300 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 308 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
301 if (rxq->pool[i].skb != NULL) { 309 if (rxq->pool[i].skb != NULL) {
302 pci_unmap_single(priv->pci_dev, 310 pci_unmap_single(priv->pci_dev,
303 rxq->pool[i].dma_addr, 311 rxq->pool[i].real_dma_addr,
304 priv->hw_params.rx_buf_size, 312 priv->hw_params.rx_buf_size + 256,
305 PCI_DMA_FROMDEVICE); 313 PCI_DMA_FROMDEVICE);
306 dev_kfree_skb(rxq->pool[i].skb); 314 dev_kfree_skb(rxq->pool[i].skb);
307 } 315 }
@@ -354,8 +362,8 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
354 * to an SKB, so we need to unmap and free potential storage */ 362 * to an SKB, so we need to unmap and free potential storage */
355 if (rxq->pool[i].skb != NULL) { 363 if (rxq->pool[i].skb != NULL) {
356 pci_unmap_single(priv->pci_dev, 364 pci_unmap_single(priv->pci_dev,
357 rxq->pool[i].dma_addr, 365 rxq->pool[i].real_dma_addr,
358 priv->hw_params.rx_buf_size, 366 priv->hw_params.rx_buf_size + 256,
359 PCI_DMA_FROMDEVICE); 367 PCI_DMA_FROMDEVICE);
360 priv->alloc_rxb_skb--; 368 priv->alloc_rxb_skb--;
361 dev_kfree_skb(rxq->pool[i].skb); 369 dev_kfree_skb(rxq->pool[i].skb);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 3b0bee331a33..c89365e2ca58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -896,6 +896,13 @@ static void iwl_bg_request_scan(struct work_struct *data)
896 return; 896 return;
897 897
898 done: 898 done:
899 /* Cannot perform scan. Make sure we clear scanning
900 * bits from status so next scan request can be performed.
901 * If we don't clear scanning status bit here all next scan
902 * will fail
903 */
904 clear_bit(STATUS_SCAN_HW, &priv->status);
905 clear_bit(STATUS_SCANNING, &priv->status);
899 /* inform mac80211 scan aborted */ 906 /* inform mac80211 scan aborted */
900 queue_work(priv->workqueue, &priv->scan_completed); 907 queue_work(priv->workqueue, &priv->scan_completed);
901 mutex_unlock(&priv->mutex); 908 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 61797f3f8d5c..26f7084d3011 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -475,7 +475,7 @@ static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
475 if (!test_and_set_bit(i, &priv->ucode_key_table)) 475 if (!test_and_set_bit(i, &priv->ucode_key_table))
476 return i; 476 return i;
477 477
478 return -1; 478 return WEP_INVALID_OFFSET;
479} 479}
480 480
481int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) 481int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
@@ -620,6 +620,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
620 /* else, we are overriding an existing key => no need to allocated room 620 /* else, we are overriding an existing key => no need to allocated room
621 * in uCode. */ 621 * in uCode. */
622 622
623 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
624 "no space for new kew");
625
623 priv->stations[sta_id].sta.key.key_flags = key_flags; 626 priv->stations[sta_id].sta.key.key_flags = key_flags;
624 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 627 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
625 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 628 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -637,6 +640,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
637{ 640{
638 unsigned long flags; 641 unsigned long flags;
639 __le16 key_flags = 0; 642 __le16 key_flags = 0;
643 int ret;
640 644
641 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 645 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
642 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 646 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -664,14 +668,18 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
664 /* else, we are overriding an existing key => no need to allocated room 668 /* else, we are overriding an existing key => no need to allocated room
665 * in uCode. */ 669 * in uCode. */
666 670
671 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
672 "no space for new kew");
673
667 priv->stations[sta_id].sta.key.key_flags = key_flags; 674 priv->stations[sta_id].sta.key.key_flags = key_flags;
668 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 675 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
669 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 676 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
670 677
678 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
679
671 spin_unlock_irqrestore(&priv->sta_lock, flags); 680 spin_unlock_irqrestore(&priv->sta_lock, flags);
672 681
673 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); 682 return ret;
674 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
675} 683}
676 684
677static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 685static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -696,6 +704,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
696 /* else, we are overriding an existing key => no need to allocated room 704 /* else, we are overriding an existing key => no need to allocated room
697 * in uCode. */ 705 * in uCode. */
698 706
707 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
708 "no space for new kew");
709
699 /* This copy is acutally not needed: we get the key with each TX */ 710 /* This copy is acutally not needed: we get the key with each TX */
700 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); 711 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
701 712
@@ -734,6 +745,13 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
734 return 0; 745 return 0;
735 } 746 }
736 747
748 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
749 IWL_WARNING("Removing wrong key %d 0x%x\n",
750 keyconf->keyidx, key_flags);
751 spin_unlock_irqrestore(&priv->sta_lock, flags);
752 return 0;
753 }
754
737 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, 755 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
738 &priv->ucode_key_table)) 756 &priv->ucode_key_table))
739 IWL_ERROR("index %d not used in uCode key table.\n", 757 IWL_ERROR("index %d not used in uCode key table.\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d15a2c997954..45a6b0c35695 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -5768,7 +5768,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5768 if (priv->error_recovering) 5768 if (priv->error_recovering)
5769 iwl3945_error_recovery(priv); 5769 iwl3945_error_recovery(priv);
5770 5770
5771 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
5772 return; 5771 return;
5773 5772
5774 restart: 5773 restart:
@@ -6256,6 +6255,11 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6256 n_probes, 6255 n_probes,
6257 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 6256 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6258 6257
6258 if (scan->channel_count == 0) {
6259 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
6260 goto done;
6261 }
6262
6259 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 6263 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
6260 scan->channel_count * sizeof(struct iwl3945_scan_channel); 6264 scan->channel_count * sizeof(struct iwl3945_scan_channel);
6261 cmd.data = scan; 6265 cmd.data = scan;
@@ -6273,6 +6277,14 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6273 return; 6277 return;
6274 6278
6275 done: 6279 done:
6280 /* can not perform scan make sure we clear scanning
6281 * bits from status so next scan request can be performed.
6282 * if we dont clear scanning status bit here all next scan
6283 * will fail
6284 */
6285 clear_bit(STATUS_SCAN_HW, &priv->status);
6286 clear_bit(STATUS_SCANNING, &priv->status);
6287
6276 /* inform mac80211 scan aborted */ 6288 /* inform mac80211 scan aborted */
6277 queue_work(priv->workqueue, &priv->scan_completed); 6289 queue_work(priv->workqueue, &priv->scan_completed);
6278 mutex_unlock(&priv->mutex); 6290 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 297696de2da0..8265c7d25edc 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -605,9 +605,9 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
605 if (ret == 0) { 605 if (ret == 0) {
606 *curlevel = le16_to_cpu(cmd.curlevel); 606 *curlevel = le16_to_cpu(cmd.curlevel);
607 if (minlevel) 607 if (minlevel)
608 *minlevel = le16_to_cpu(cmd.minlevel); 608 *minlevel = cmd.minlevel;
609 if (maxlevel) 609 if (maxlevel)
610 *maxlevel = le16_to_cpu(cmd.maxlevel); 610 *maxlevel = cmd.maxlevel;
611 } 611 }
612 612
613 lbs_deb_leave(LBS_DEB_CMD); 613 lbs_deb_leave(LBS_DEB_CMD);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 5749f22b296f..079e6aa874dc 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -328,7 +328,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
328 lbs_deb_rx("rx err: frame received with bad length\n"); 328 lbs_deb_rx("rx err: frame received with bad length\n");
329 priv->stats.rx_length_errors++; 329 priv->stats.rx_length_errors++;
330 ret = -EINVAL; 330 ret = -EINVAL;
331 kfree(skb); 331 kfree_skb(skb);
332 goto done; 332 goto done;
333 } 333 }
334 334
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 8f66903641b9..22c4c6110521 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -598,8 +598,8 @@ static int lbs_process_bss(struct bss_descriptor *bss,
598 598
599 switch (elem->id) { 599 switch (elem->id) {
600 case MFIE_TYPE_SSID: 600 case MFIE_TYPE_SSID:
601 bss->ssid_len = elem->len; 601 bss->ssid_len = min_t(int, 32, elem->len);
602 memcpy(bss->ssid, elem->data, elem->len); 602 memcpy(bss->ssid, elem->data, bss->ssid_len);
603 lbs_deb_scan("got SSID IE: '%s', len %u\n", 603 lbs_deb_scan("got SSID IE: '%s', len %u\n",
604 escape_essid(bss->ssid, bss->ssid_len), 604 escape_essid(bss->ssid, bss->ssid_len),
605 bss->ssid_len); 605 bss->ssid_len);
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 1cc03a8dd67a..59634c33b1f9 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -331,7 +331,7 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
331 /* Fill the receive configuration URB and initialise the Rx call back */ 331 /* Fill the receive configuration URB and initialise the Rx call back */
332 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, 332 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
333 usb_rcvbulkpipe(cardp->udev, cardp->ep_in), 333 usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
334 (void *) (skb->tail), 334 skb_tail_pointer(skb),
335 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); 335 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
336 336
337 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; 337 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 50904771f291..e0512e49d6d3 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -433,7 +433,7 @@ struct fw_info {
433const static struct fw_info orinoco_fw[] = { 433const static struct fw_info orinoco_fw[] = {
434 { "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 }, 434 { "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
435 { "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 }, 435 { "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
436 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 0x100 } 436 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 512 }
437}; 437};
438 438
439/* Structure used to access fields in FW 439/* Structure used to access fields in FW
@@ -458,7 +458,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
458 int ap) 458 int ap)
459{ 459{
460 /* Plug Data Area (PDA) */ 460 /* Plug Data Area (PDA) */
461 __le16 pda[512] = { 0 }; 461 __le16 *pda;
462 462
463 hermes_t *hw = &priv->hw; 463 hermes_t *hw = &priv->hw;
464 const struct firmware *fw_entry; 464 const struct firmware *fw_entry;
@@ -467,7 +467,11 @@ orinoco_dl_firmware(struct orinoco_private *priv,
467 const unsigned char *end; 467 const unsigned char *end;
468 const char *firmware; 468 const char *firmware;
469 struct net_device *dev = priv->ndev; 469 struct net_device *dev = priv->ndev;
470 int err; 470 int err = 0;
471
472 pda = kzalloc(fw->pda_size, GFP_KERNEL);
473 if (!pda)
474 return -ENOMEM;
471 475
472 if (ap) 476 if (ap)
473 firmware = fw->ap_fw; 477 firmware = fw->ap_fw;
@@ -478,17 +482,17 @@ orinoco_dl_firmware(struct orinoco_private *priv,
478 dev->name, firmware); 482 dev->name, firmware);
479 483
480 /* Read current plug data */ 484 /* Read current plug data */
481 err = hermes_read_pda(hw, pda, fw->pda_addr, 485 err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0);
482 min_t(u16, fw->pda_size, sizeof(pda)), 0);
483 printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err); 486 printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err);
484 if (err) 487 if (err)
485 return err; 488 goto free;
486 489
487 err = request_firmware(&fw_entry, firmware, priv->dev); 490 err = request_firmware(&fw_entry, firmware, priv->dev);
488 if (err) { 491 if (err) {
489 printk(KERN_ERR "%s: Cannot find firmware %s\n", 492 printk(KERN_ERR "%s: Cannot find firmware %s\n",
490 dev->name, firmware); 493 dev->name, firmware);
491 return -ENOENT; 494 err = -ENOENT;
495 goto free;
492 } 496 }
493 497
494 hdr = (const struct orinoco_fw_header *) fw_entry->data; 498 hdr = (const struct orinoco_fw_header *) fw_entry->data;
@@ -532,6 +536,9 @@ orinoco_dl_firmware(struct orinoco_private *priv,
532 536
533abort: 537abort:
534 release_firmware(fw_entry); 538 release_firmware(fw_entry);
539
540free:
541 kfree(pda);
535 return err; 542 return err;
536} 543}
537 544
@@ -549,12 +556,12 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
549 int secondary) 556 int secondary)
550{ 557{
551 hermes_t *hw = &priv->hw; 558 hermes_t *hw = &priv->hw;
552 int ret; 559 int ret = 0;
553 const unsigned char *ptr; 560 const unsigned char *ptr;
554 const unsigned char *first_block; 561 const unsigned char *first_block;
555 562
556 /* Plug Data Area (PDA) */ 563 /* Plug Data Area (PDA) */
557 __le16 pda[256]; 564 __le16 *pda = NULL;
558 565
559 /* Binary block begins after the 0x1A marker */ 566 /* Binary block begins after the 0x1A marker */
560 ptr = image; 567 ptr = image;
@@ -563,28 +570,33 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
563 570
564 /* Read the PDA from EEPROM */ 571 /* Read the PDA from EEPROM */
565 if (secondary) { 572 if (secondary) {
566 ret = hermes_read_pda(hw, pda, fw->pda_addr, sizeof(pda), 1); 573 pda = kzalloc(fw->pda_size, GFP_KERNEL);
574 if (!pda)
575 return -ENOMEM;
576
577 ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1);
567 if (ret) 578 if (ret)
568 return ret; 579 goto free;
569 } 580 }
570 581
571 /* Stop the firmware, so that it can be safely rewritten */ 582 /* Stop the firmware, so that it can be safely rewritten */
572 if (priv->stop_fw) { 583 if (priv->stop_fw) {
573 ret = priv->stop_fw(priv, 1); 584 ret = priv->stop_fw(priv, 1);
574 if (ret) 585 if (ret)
575 return ret; 586 goto free;
576 } 587 }
577 588
578 /* Program the adapter with new firmware */ 589 /* Program the adapter with new firmware */
579 ret = hermes_program(hw, first_block, end); 590 ret = hermes_program(hw, first_block, end);
580 if (ret) 591 if (ret)
581 return ret; 592 goto free;
582 593
583 /* Write the PDA to the adapter */ 594 /* Write the PDA to the adapter */
584 if (secondary) { 595 if (secondary) {
585 size_t len = hermes_blocks_length(first_block); 596 size_t len = hermes_blocks_length(first_block);
586 ptr = first_block + len; 597 ptr = first_block + len;
587 ret = hermes_apply_pda(hw, ptr, pda); 598 ret = hermes_apply_pda(hw, ptr, pda);
599 kfree(pda);
588 if (ret) 600 if (ret)
589 return ret; 601 return ret;
590 } 602 }
@@ -608,6 +620,10 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
608 return -ENODEV; 620 return -ENODEV;
609 621
610 return 0; 622 return 0;
623
624free:
625 kfree(pda);
626 return ret;
611} 627}
612 628
613 629
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 117c7d3a52b0..827ca0384a4c 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -306,8 +306,8 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
306 return 0; 306 return 0;
307} 307}
308 308
309static const char *p54_rf_chips[] = { "NULL", "Indigo?", "Duette", 309static const char *p54_rf_chips[] = { "NULL", "Duette3", "Duette2",
310 "Frisbee", "Xbow", "Longbow" }; 310 "Frisbee", "Xbow", "Longbow", "NULL", "NULL" };
311static int p54_init_xbow_synth(struct ieee80211_hw *dev); 311static int p54_init_xbow_synth(struct ieee80211_hw *dev);
312 312
313static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) 313static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
@@ -319,6 +319,7 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
319 void *tmp; 319 void *tmp;
320 int err; 320 int err;
321 u8 *end = (u8 *)eeprom + len; 321 u8 *end = (u8 *)eeprom + len;
322 u16 synth = 0;
322 DECLARE_MAC_BUF(mac); 323 DECLARE_MAC_BUF(mac);
323 324
324 wrap = (struct eeprom_pda_wrap *) eeprom; 325 wrap = (struct eeprom_pda_wrap *) eeprom;
@@ -400,8 +401,8 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
400 tmp = entry->data; 401 tmp = entry->data;
401 while ((u8 *)tmp < entry->data + data_len) { 402 while ((u8 *)tmp < entry->data + data_len) {
402 struct bootrec_exp_if *exp_if = tmp; 403 struct bootrec_exp_if *exp_if = tmp;
403 if (le16_to_cpu(exp_if->if_id) == 0xF) 404 if (le16_to_cpu(exp_if->if_id) == 0xf)
404 priv->rxhw = le16_to_cpu(exp_if->variant) & 0x07; 405 synth = le16_to_cpu(exp_if->variant);
405 tmp += sizeof(struct bootrec_exp_if); 406 tmp += sizeof(struct bootrec_exp_if);
406 } 407 }
407 break; 408 break;
@@ -421,28 +422,20 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
421 entry = (void *)entry + (entry_len + 1)*2; 422 entry = (void *)entry + (entry_len + 1)*2;
422 } 423 }
423 424
424 if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) { 425 if (!synth || !priv->iq_autocal || !priv->output_limit ||
426 !priv->curve_data) {
425 printk(KERN_ERR "p54: not all required entries found in eeprom!\n"); 427 printk(KERN_ERR "p54: not all required entries found in eeprom!\n");
426 err = -EINVAL; 428 err = -EINVAL;
427 goto err; 429 goto err;
428 } 430 }
429 431
430 switch (priv->rxhw) { 432 priv->rxhw = synth & 0x07;
431 case 4: /* XBow */ 433 if (priv->rxhw == 4)
432 p54_init_xbow_synth(dev); 434 p54_init_xbow_synth(dev);
433 case 1: /* Indigo? */ 435 if (!(synth & 0x40))
434 case 2: /* Duette */
435 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
436 case 3: /* Frisbee */
437 case 5: /* Longbow */
438 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz; 436 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
439 break; 437 if (!(synth & 0x80))
440 default: 438 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
441 printk(KERN_ERR "%s: unsupported RF-Chip\n",
442 wiphy_name(dev->wiphy));
443 err = -EINVAL;
444 goto err;
445 }
446 439
447 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { 440 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
448 u8 perm_addr[ETH_ALEN]; 441 u8 perm_addr[ETH_ALEN];
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1c2a02a741af..88b3cad8b65e 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -346,68 +346,6 @@ static void p54p_tx(struct ieee80211_hw *dev, struct p54_control_hdr *data,
346 printk(KERN_INFO "%s: tx overflow.\n", wiphy_name(dev->wiphy)); 346 printk(KERN_INFO "%s: tx overflow.\n", wiphy_name(dev->wiphy));
347} 347}
348 348
349static int p54p_open(struct ieee80211_hw *dev)
350{
351 struct p54p_priv *priv = dev->priv;
352 int err;
353
354 init_completion(&priv->boot_comp);
355 err = request_irq(priv->pdev->irq, &p54p_interrupt,
356 IRQF_SHARED, "p54pci", dev);
357 if (err) {
358 printk(KERN_ERR "%s: failed to register IRQ handler\n",
359 wiphy_name(dev->wiphy));
360 return err;
361 }
362
363 memset(priv->ring_control, 0, sizeof(*priv->ring_control));
364 err = p54p_upload_firmware(dev);
365 if (err) {
366 free_irq(priv->pdev->irq, dev);
367 return err;
368 }
369 priv->rx_idx_data = priv->tx_idx_data = 0;
370 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
371
372 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
373 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
374
375 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
376 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
377
378 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
379 P54P_READ(ring_control_base);
380 wmb();
381 udelay(10);
382
383 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
384 P54P_READ(int_enable);
385 wmb();
386 udelay(10);
387
388 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
389 P54P_READ(dev_int);
390
391 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
392 printk(KERN_ERR "%s: Cannot boot firmware!\n",
393 wiphy_name(dev->wiphy));
394 free_irq(priv->pdev->irq, dev);
395 return -ETIMEDOUT;
396 }
397
398 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
399 P54P_READ(int_enable);
400 wmb();
401 udelay(10);
402
403 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
404 P54P_READ(dev_int);
405 wmb();
406 udelay(10);
407
408 return 0;
409}
410
411static void p54p_stop(struct ieee80211_hw *dev) 349static void p54p_stop(struct ieee80211_hw *dev)
412{ 350{
413 struct p54p_priv *priv = dev->priv; 351 struct p54p_priv *priv = dev->priv;
@@ -474,6 +412,68 @@ static void p54p_stop(struct ieee80211_hw *dev)
474 memset(ring_control, 0, sizeof(*ring_control)); 412 memset(ring_control, 0, sizeof(*ring_control));
475} 413}
476 414
415static int p54p_open(struct ieee80211_hw *dev)
416{
417 struct p54p_priv *priv = dev->priv;
418 int err;
419
420 init_completion(&priv->boot_comp);
421 err = request_irq(priv->pdev->irq, &p54p_interrupt,
422 IRQF_SHARED, "p54pci", dev);
423 if (err) {
424 printk(KERN_ERR "%s: failed to register IRQ handler\n",
425 wiphy_name(dev->wiphy));
426 return err;
427 }
428
429 memset(priv->ring_control, 0, sizeof(*priv->ring_control));
430 err = p54p_upload_firmware(dev);
431 if (err) {
432 free_irq(priv->pdev->irq, dev);
433 return err;
434 }
435 priv->rx_idx_data = priv->tx_idx_data = 0;
436 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
437
438 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
439 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
440
441 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
442 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
443
444 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
445 P54P_READ(ring_control_base);
446 wmb();
447 udelay(10);
448
449 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
450 P54P_READ(int_enable);
451 wmb();
452 udelay(10);
453
454 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
455 P54P_READ(dev_int);
456
457 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
458 printk(KERN_ERR "%s: Cannot boot firmware!\n",
459 wiphy_name(dev->wiphy));
460 p54p_stop(dev);
461 return -ETIMEDOUT;
462 }
463
464 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
465 P54P_READ(int_enable);
466 wmb();
467 udelay(10);
468
469 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
470 P54P_READ(dev_int);
471 wmb();
472 udelay(10);
473
474 return 0;
475}
476
477static int __devinit p54p_probe(struct pci_dev *pdev, 477static int __devinit p54p_probe(struct pci_dev *pdev,
478 const struct pci_device_id *id) 478 const struct pci_device_id *id)
479{ 479{
@@ -556,11 +556,13 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
556 spin_lock_init(&priv->lock); 556 spin_lock_init(&priv->lock);
557 tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev); 557 tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
558 558
559 p54p_open(dev); 559 err = p54p_open(dev);
560 if (err)
561 goto err_free_common;
560 err = p54_read_eeprom(dev); 562 err = p54_read_eeprom(dev);
561 p54p_stop(dev); 563 p54p_stop(dev);
562 if (err) 564 if (err)
563 goto err_free_desc; 565 goto err_free_common;
564 566
565 err = ieee80211_register_hw(dev); 567 err = ieee80211_register_hw(dev);
566 if (err) { 568 if (err) {
@@ -573,8 +575,6 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
573 575
574 err_free_common: 576 err_free_common:
575 p54_free_common(dev); 577 p54_free_common(dev);
576
577 err_free_desc:
578 pci_free_consistent(pdev, sizeof(*priv->ring_control), 578 pci_free_consistent(pdev, sizeof(*priv->ring_control),
579 priv->ring_control, priv->ring_control_dma); 579 priv->ring_control, priv->ring_control_dma);
580 580
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index f839ce044afd..95511ac22470 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,5 +1,5 @@
1menuconfig RT2X00 1menuconfig RT2X00
2 bool "Ralink driver support" 2 tristate "Ralink driver support"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
4 ---help--- 4 ---help---
5 This will enable the experimental support for the Ralink drivers, 5 This will enable the experimental support for the Ralink drivers,
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 431e3c78bf27..69eb0132593b 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -48,6 +48,9 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
48 {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187}, 48 {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187},
49 /* Sitecom */ 49 /* Sitecom */
50 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, 50 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
51 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
52 /* Abocom */
53 {USB_DEVICE(0x13d1, 0xabe6), .driver_info = DEVICE_RTL8187},
51 {} 54 {}
52}; 55};
53 56
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index fe1867b25ff7..cac732f4047f 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -615,7 +615,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
615 struct ieee80211_hdr *tx_hdr; 615 struct ieee80211_hdr *tx_hdr;
616 616
617 tx_hdr = (struct ieee80211_hdr *)skb->data; 617 tx_hdr = (struct ieee80211_hdr *)skb->data;
618 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) 618 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
619 { 619 {
620 __skb_unlink(skb, q); 620 __skb_unlink(skb, q);
621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1); 621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index a60ae86bd5c9..a3ccd8c1c716 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -61,6 +61,7 @@ static struct usb_device_id usb_ids[] = {
61 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
62 /* ZD1211B */ 62 /* ZD1211B */
63 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, 63 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 65 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, 66 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
66 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, 67 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
@@ -82,6 +83,7 @@ static struct usb_device_id usb_ids[] = {
82 { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B }, 83 { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B },
83 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, 84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
84 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
85 /* "Driverless" devices that need ejecting */ 87 /* "Driverless" devices that need ejecting */
86 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 88 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
87 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, 89 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c6948d8f53f6..6d017adc914a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1785,7 +1785,7 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
1785 return 0; 1785 return 0;
1786} 1786}
1787 1787
1788static struct xenbus_driver netfront = { 1788static struct xenbus_driver netfront_driver = {
1789 .name = "vif", 1789 .name = "vif",
1790 .owner = THIS_MODULE, 1790 .owner = THIS_MODULE,
1791 .ids = netfront_ids, 1791 .ids = netfront_ids,
@@ -1805,7 +1805,7 @@ static int __init netif_init(void)
1805 1805
1806 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); 1806 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
1807 1807
1808 return xenbus_register_frontend(&netfront); 1808 return xenbus_register_frontend(&netfront_driver);
1809} 1809}
1810module_init(netif_init); 1810module_init(netif_init);
1811 1811
@@ -1815,7 +1815,7 @@ static void __exit netif_exit(void)
1815 if (xen_initial_domain()) 1815 if (xen_initial_domain())
1816 return; 1816 return;
1817 1817
1818 xenbus_unregister_driver(&netfront); 1818 xenbus_unregister_driver(&netfront_driver);
1819} 1819}
1820module_exit(netif_exit); 1820module_exit(netif_exit);
1821 1821
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
new file mode 100644
index 000000000000..da42aa06a3ba
--- /dev/null
+++ b/drivers/net/xtsonic.c
@@ -0,0 +1,319 @@
1/*
2 * xtsonic.c
3 *
4 * (C) 2001 - 2007 Tensilica Inc.
5 * Kevin Chea <kchea@yahoo.com>
6 * Marc Gauthier <marc@linux-xtensa.org>
7 * Chris Zankel <chris@zankel.net>
8 *
9 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
10 *
11 * This driver is based on work from Andreas Busse, but most of
12 * the code is rewritten.
13 *
14 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
15 *
16 * A driver for the onboard Sonic ethernet controller on the XT2000.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/fcntl.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/slab.h>
28#include <linux/string.h>
29#include <linux/delay.h>
30#include <linux/errno.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/platform_device.h>
35#include <linux/dma-mapping.h>
36
37#include <asm/io.h>
38#include <asm/pgtable.h>
39#include <asm/dma.h>
40
41static char xtsonic_string[] = "xtsonic";
42
43extern unsigned xtboard_nvram_valid(void);
44extern void xtboard_get_ether_addr(unsigned char *buf);
45
46#include "sonic.h"
47
48/*
49 * According to the documentation for the Sonic ethernet controller,
50 * EOBC should be 760 words (1520 bytes) for 32-bit applications, and,
51 * as such, 2 words less than the buffer size. The value for RBSIZE
52 * defined in sonic.h, however is only 1520.
53 *
54 * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and
55 * RBSIZE 1520 bytes)
56 */
57#undef SONIC_RBSIZE
58#define SONIC_RBSIZE 1524
59
60/*
61 * The chip provides 256 byte register space.
62 */
63#define SONIC_MEM_SIZE 0x100
64
65/*
66 * Macros to access SONIC registers
67 */
68#define SONIC_READ(reg) \
69 (0xffff & *((volatile unsigned int *)dev->base_addr+reg))
70
71#define SONIC_WRITE(reg,val) \
72 *((volatile unsigned int *)dev->base_addr+reg) = val
73
74
75/* Use 0 for production, 1 for verification, and >2 for debug */
76#ifdef SONIC_DEBUG
77static unsigned int sonic_debug = SONIC_DEBUG;
78#else
79static unsigned int sonic_debug = 1;
80#endif
81
82/*
83 * We cannot use station (ethernet) address prefixes to detect the
84 * sonic controller since these are board manufacturer depended.
85 * So we check for known Silicon Revision IDs instead.
86 */
87static unsigned short known_revisions[] =
88{
89 0x101, /* SONIC 83934 */
90 0xffff /* end of list */
91};
92
93static int xtsonic_open(struct net_device *dev)
94{
95 if (request_irq(dev->irq,&sonic_interrupt,IRQF_DISABLED,"sonic",dev)) {
96 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
97 dev->name, dev->irq);
98 return -EAGAIN;
99 }
100 return sonic_open(dev);
101}
102
103static int xtsonic_close(struct net_device *dev)
104{
105 int err;
106 err = sonic_close(dev);
107 free_irq(dev->irq, dev);
108 return err;
109}
110
111static int __init sonic_probe1(struct net_device *dev)
112{
113 static unsigned version_printed = 0;
114 unsigned int silicon_revision;
115 struct sonic_local *lp = netdev_priv(dev);
116 unsigned int base_addr = dev->base_addr;
117 int i;
118 int err = 0;
119
120 if (!request_mem_region(base_addr, 0x100, xtsonic_string))
121 return -EBUSY;
122
123 /*
124 * get the Silicon Revision ID. If this is one of the known
125 * one assume that we found a SONIC ethernet controller at
126 * the expected location.
127 */
128 silicon_revision = SONIC_READ(SONIC_SR);
129 if (sonic_debug > 1)
130 printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
131
132 i = 0;
133 while ((known_revisions[i] != 0xffff) &&
134 (known_revisions[i] != silicon_revision))
135 i++;
136
137 if (known_revisions[i] == 0xffff) {
138 printk("SONIC ethernet controller not found (0x%4x)\n",
139 silicon_revision);
140 return -ENODEV;
141 }
142
143 if (sonic_debug && version_printed++ == 0)
144 printk(version);
145
146 /*
147 * Put the sonic into software reset, then retrieve ethernet address.
148 * Note: we are assuming that the boot-loader has initialized the cam.
149 */
150 SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
151 SONIC_WRITE(SONIC_DCR,
152 SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS);
153 SONIC_WRITE(SONIC_CEP,0);
154 SONIC_WRITE(SONIC_IMR,0);
155
156 SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
157 SONIC_WRITE(SONIC_CEP,0);
158
159 for (i=0; i<3; i++) {
160 unsigned int val = SONIC_READ(SONIC_CAP0-i);
161 dev->dev_addr[i*2] = val;
162 dev->dev_addr[i*2+1] = val >> 8;
163 }
164
165 /* Initialize the device structure. */
166
167 lp->dma_bitmode = SONIC_BITMODE32;
168
169 /*
170 * Allocate local private descriptor areas in uncached space.
171 * The entire structure must be located within the same 64kb segment.
172 * A simple way to ensure this is to allocate twice the
173 * size of the structure -- given that the structure is
174 * much less than 64 kB, at least one of the halves of
175 * the allocated area will be contained entirely in 64 kB.
176 * We also allocate extra space for a pointer to allow freeing
177 * this structure later on (in xtsonic_cleanup_module()).
178 */
179 lp->descriptors =
180 dma_alloc_coherent(lp->device,
181 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
182 &lp->descriptors_laddr, GFP_KERNEL);
183
184 if (lp->descriptors == NULL) {
185 printk(KERN_ERR "%s: couldn't alloc DMA memory for "
186 " descriptors.\n", lp->device->bus_id);
187 goto out;
188 }
189
190 lp->cda = lp->descriptors;
191 lp->tda = lp->cda + (SIZEOF_SONIC_CDA
192 * SONIC_BUS_SCALE(lp->dma_bitmode));
193 lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
194 * SONIC_BUS_SCALE(lp->dma_bitmode));
195 lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
196 * SONIC_BUS_SCALE(lp->dma_bitmode));
197
198 /* get the virtual dma address */
199
200 lp->cda_laddr = lp->descriptors_laddr;
201 lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
202 * SONIC_BUS_SCALE(lp->dma_bitmode));
203 lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
204 * SONIC_BUS_SCALE(lp->dma_bitmode));
205 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
206 * SONIC_BUS_SCALE(lp->dma_bitmode));
207
208 dev->open = xtsonic_open;
209 dev->stop = xtsonic_close;
210 dev->hard_start_xmit = sonic_send_packet;
211 dev->get_stats = sonic_get_stats;
212 dev->set_multicast_list = &sonic_multicast_list;
213 dev->tx_timeout = sonic_tx_timeout;
214 dev->watchdog_timeo = TX_TIMEOUT;
215
216 /*
217 * clear tally counter
218 */
219 SONIC_WRITE(SONIC_CRCT,0xffff);
220 SONIC_WRITE(SONIC_FAET,0xffff);
221 SONIC_WRITE(SONIC_MPT,0xffff);
222
223 return 0;
224out:
225 release_region(dev->base_addr, SONIC_MEM_SIZE);
226 return err;
227}
228
229
230/*
231 * Probe for a SONIC ethernet controller on an XT2000 board.
232 * Actually probing is superfluous but we're paranoid.
233 */
234
235int __init xtsonic_probe(struct platform_device *pdev)
236{
237 struct net_device *dev;
238 struct sonic_local *lp;
239 struct resource *resmem, *resirq;
240 int err = 0;
241
242 DECLARE_MAC_BUF(mac);
243
244 if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL)
245 return -ENODEV;
246
247 if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL)
248 return -ENODEV;
249
250 if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL)
251 return -ENOMEM;
252
253 lp = netdev_priv(dev);
254 lp->device = &pdev->dev;
255 SET_NETDEV_DEV(dev, &pdev->dev);
256 netdev_boot_setup_check(dev);
257
258 dev->base_addr = resmem->start;
259 dev->irq = resirq->start;
260
261 if ((err = sonic_probe1(dev)))
262 goto out;
263 if ((err = register_netdev(dev)))
264 goto out1;
265
266 printk("%s: SONIC ethernet @%08lx, MAC %s, IRQ %d\n", dev->name,
267 dev->base_addr, print_mac(mac, dev->dev_addr), dev->irq);
268
269 return 0;
270
271out1:
272 release_region(dev->base_addr, SONIC_MEM_SIZE);
273out:
274 free_netdev(dev);
275
276 return err;
277}
278
279MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
280module_param(sonic_debug, int, 0);
281MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
282
283#include "sonic.c"
284
285static int __devexit xtsonic_device_remove (struct platform_device *pdev)
286{
287 struct net_device *dev = platform_get_drvdata(pdev);
288 struct sonic_local *lp = netdev_priv(dev);
289
290 unregister_netdev(dev);
291 dma_free_coherent(lp->device,
292 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
293 lp->descriptors, lp->descriptors_laddr);
294 release_region (dev->base_addr, SONIC_MEM_SIZE);
295 free_netdev(dev);
296
297 return 0;
298}
299
300static struct platform_driver xtsonic_driver = {
301 .probe = xtsonic_probe,
302 .remove = __devexit_p(xtsonic_device_remove),
303 .driver = {
304 .name = xtsonic_string,
305 },
306};
307
308static int __init xtsonic_init(void)
309{
310 return platform_driver_register(&xtsonic_driver);
311}
312
313static void __exit xtsonic_cleanup(void)
314{
315 platform_driver_unregister(&xtsonic_driver);
316}
317
318module_init(xtsonic_init);
319module_exit(xtsonic_cleanup);