aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2006-05-05 16:50:23 -0400
committerJohn W. Linville <linville@tuxdriver.com>2006-05-05 16:50:23 -0400
commitaad61439e6a00bdb72cb649e11f6e166590c5f66 (patch)
tree2279f3c2a15f81526d14182c6acb358cafd0b359 /drivers/net
parent3c304956755fa63ee80ca51ce38078fe1c4e8818 (diff)
parentd98550e334715b2d9e45f8f0f4e1608720108640 (diff)
Merge branch 'from-linus' into upstream
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--drivers/net/forcedeth.c389
-rw-r--r--drivers/net/gianfar.c56
-rw-r--r--drivers/net/gianfar.h67
-rw-r--r--drivers/net/gianfar_ethtool.c20
-rw-r--r--drivers/net/gianfar_sysfs.c24
-rw-r--r--drivers/net/hamradio/dmascc.c1
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/sky2.c52
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/tg3.c82
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/via-rhine.c6
15 files changed, 503 insertions, 202 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index add8dc4aa7b0..c99e87838f92 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3768,6 +3768,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3768 ps_page->ps_page[j] = NULL; 3768 ps_page->ps_page[j] = NULL;
3769 skb->len += length; 3769 skb->len += length;
3770 skb->data_len += length; 3770 skb->data_len += length;
3771 skb->truesize += length;
3771 } 3772 }
3772 3773
3773copydone: 3774copydone:
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 7627a75f4f7c..f7235c9bc421 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -105,6 +105,8 @@
105 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 105 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
108 * 110 *
109 * Known bugs: 111 * Known bugs:
110 * We suspect that on some hardware no TX done interrupts are generated. 112 * We suspect that on some hardware no TX done interrupts are generated.
@@ -116,7 +118,7 @@
116 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 118 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
117 * superfluous timer interrupts from the nic. 119 * superfluous timer interrupts from the nic.
118 */ 120 */
119#define FORCEDETH_VERSION "0.52" 121#define FORCEDETH_VERSION "0.54"
120#define DRV_NAME "forcedeth" 122#define DRV_NAME "forcedeth"
121 123
122#include <linux/module.h> 124#include <linux/module.h>
@@ -160,6 +162,7 @@
160#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 162#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
161#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 163#define DEV_HAS_MSI 0x0040 /* device supports MSI */
162#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 164#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
165#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
163 166
164enum { 167enum {
165 NvRegIrqStatus = 0x000, 168 NvRegIrqStatus = 0x000,
@@ -203,6 +206,8 @@ enum {
203#define NVREG_MISC1_HD 0x02 206#define NVREG_MISC1_HD 0x02
204#define NVREG_MISC1_FORCE 0x3b0f3c 207#define NVREG_MISC1_FORCE 0x3b0f3c
205 208
209 NvRegMacReset = 0x3c,
210#define NVREG_MAC_RESET_ASSERT 0x0F3
206 NvRegTransmitterControl = 0x084, 211 NvRegTransmitterControl = 0x084,
207#define NVREG_XMITCTL_START 0x01 212#define NVREG_XMITCTL_START 0x01
208 NvRegTransmitterStatus = 0x088, 213 NvRegTransmitterStatus = 0x088,
@@ -326,6 +331,10 @@ enum {
326 NvRegMSIXMap0 = 0x3e0, 331 NvRegMSIXMap0 = 0x3e0,
327 NvRegMSIXMap1 = 0x3e4, 332 NvRegMSIXMap1 = 0x3e4,
328 NvRegMSIXIrqStatus = 0x3f0, 333 NvRegMSIXIrqStatus = 0x3f0,
334
335 NvRegPowerState2 = 0x600,
336#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
337#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
329}; 338};
330 339
331/* Big endian: should work, but is untested */ 340/* Big endian: should work, but is untested */
@@ -414,7 +423,8 @@ typedef union _ring_type {
414#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 423#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
415 424
416/* Miscelaneous hardware related defines: */ 425/* Miscelaneous hardware related defines: */
417#define NV_PCI_REGSZ 0x270 426#define NV_PCI_REGSZ_VER1 0x270
427#define NV_PCI_REGSZ_VER2 0x604
418 428
419/* various timeout delays: all in usec */ 429/* various timeout delays: all in usec */
420#define NV_TXRX_RESET_DELAY 4 430#define NV_TXRX_RESET_DELAY 4
@@ -431,6 +441,7 @@ typedef union _ring_type {
431#define NV_MIIBUSY_DELAY 50 441#define NV_MIIBUSY_DELAY 50
432#define NV_MIIPHY_DELAY 10 442#define NV_MIIPHY_DELAY 10
433#define NV_MIIPHY_DELAYMAX 10000 443#define NV_MIIPHY_DELAYMAX 10000
444#define NV_MAC_RESET_DELAY 64
434 445
435#define NV_WAKEUPPATTERNS 5 446#define NV_WAKEUPPATTERNS 5
436#define NV_WAKEUPMASKENTRIES 4 447#define NV_WAKEUPMASKENTRIES 4
@@ -552,6 +563,8 @@ struct fe_priv {
552 u32 desc_ver; 563 u32 desc_ver;
553 u32 txrxctl_bits; 564 u32 txrxctl_bits;
554 u32 vlanctl_bits; 565 u32 vlanctl_bits;
566 u32 driver_data;
567 u32 register_size;
555 568
556 void __iomem *base; 569 void __iomem *base;
557 570
@@ -698,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
698 } 711 }
699} 712}
700 713
714static int using_multi_irqs(struct net_device *dev)
715{
716 struct fe_priv *np = get_nvpriv(dev);
717
718 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
719 ((np->msi_flags & NV_MSI_X_ENABLED) &&
720 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
721 return 0;
722 else
723 return 1;
724}
725
726static void nv_enable_irq(struct net_device *dev)
727{
728 struct fe_priv *np = get_nvpriv(dev);
729
730 if (!using_multi_irqs(dev)) {
731 if (np->msi_flags & NV_MSI_X_ENABLED)
732 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
733 else
734 enable_irq(dev->irq);
735 } else {
736 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
737 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
738 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
739 }
740}
741
742static void nv_disable_irq(struct net_device *dev)
743{
744 struct fe_priv *np = get_nvpriv(dev);
745
746 if (!using_multi_irqs(dev)) {
747 if (np->msi_flags & NV_MSI_X_ENABLED)
748 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
749 else
750 disable_irq(dev->irq);
751 } else {
752 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
753 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
754 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
755 }
756}
757
758/* In MSIX mode, a write to irqmask behaves as XOR */
759static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
760{
761 u8 __iomem *base = get_hwbase(dev);
762
763 writel(mask, base + NvRegIrqMask);
764}
765
766static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
767{
768 struct fe_priv *np = get_nvpriv(dev);
769 u8 __iomem *base = get_hwbase(dev);
770
771 if (np->msi_flags & NV_MSI_X_ENABLED) {
772 writel(mask, base + NvRegIrqMask);
773 } else {
774 if (np->msi_flags & NV_MSI_ENABLED)
775 writel(0, base + NvRegMSIIrqMask);
776 writel(0, base + NvRegIrqMask);
777 }
778}
779
701#define MII_READ (-1) 780#define MII_READ (-1)
702/* mii_rw: read/write a register on the PHY. 781/* mii_rw: read/write a register on the PHY.
703 * 782 *
@@ -919,6 +998,24 @@ static void nv_txrx_reset(struct net_device *dev)
919 pci_push(base); 998 pci_push(base);
920} 999}
921 1000
1001static void nv_mac_reset(struct net_device *dev)
1002{
1003 struct fe_priv *np = netdev_priv(dev);
1004 u8 __iomem *base = get_hwbase(dev);
1005
1006 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1007 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1008 pci_push(base);
1009 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1010 pci_push(base);
1011 udelay(NV_MAC_RESET_DELAY);
1012 writel(0, base + NvRegMacReset);
1013 pci_push(base);
1014 udelay(NV_MAC_RESET_DELAY);
1015 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1016 pci_push(base);
1017}
1018
922/* 1019/*
923 * nv_get_stats: dev->get_stats function 1020 * nv_get_stats: dev->get_stats function
924 * Get latest stats value from the nic. 1021 * Get latest stats value from the nic.
@@ -989,24 +1086,25 @@ static void nv_do_rx_refill(unsigned long data)
989 struct net_device *dev = (struct net_device *) data; 1086 struct net_device *dev = (struct net_device *) data;
990 struct fe_priv *np = netdev_priv(dev); 1087 struct fe_priv *np = netdev_priv(dev);
991 1088
992 1089 if (!using_multi_irqs(dev)) {
993 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1090 if (np->msi_flags & NV_MSI_X_ENABLED)
994 ((np->msi_flags & NV_MSI_X_ENABLED) && 1091 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
995 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1092 else
996 disable_irq(dev->irq); 1093 disable_irq(dev->irq);
997 } else { 1094 } else {
998 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
999 } 1096 }
1000 if (nv_alloc_rx(dev)) { 1097 if (nv_alloc_rx(dev)) {
1001 spin_lock(&np->lock); 1098 spin_lock_irq(&np->lock);
1002 if (!np->in_shutdown) 1099 if (!np->in_shutdown)
1003 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1100 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1004 spin_unlock(&np->lock); 1101 spin_unlock_irq(&np->lock);
1005 } 1102 }
1006 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1103 if (!using_multi_irqs(dev)) {
1007 ((np->msi_flags & NV_MSI_X_ENABLED) && 1104 if (np->msi_flags & NV_MSI_X_ENABLED)
1008 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1105 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1009 enable_irq(dev->irq); 1106 else
1107 enable_irq(dev->irq);
1010 } else { 1108 } else {
1011 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1109 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1012 } 1110 }
@@ -1331,7 +1429,7 @@ static void nv_tx_timeout(struct net_device *dev)
1331 dev->name, (unsigned long)np->ring_addr, 1429 dev->name, (unsigned long)np->ring_addr,
1332 np->next_tx, np->nic_tx); 1430 np->next_tx, np->nic_tx);
1333 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1431 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1334 for (i=0;i<0x400;i+= 32) { 1432 for (i=0;i<=np->register_size;i+= 32) {
1335 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1433 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1336 i, 1434 i,
1337 readl(base + i + 0), readl(base + i + 4), 1435 readl(base + i + 0), readl(base + i + 4),
@@ -1638,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1638 * guessed, there is probably a simpler approach. 1736 * guessed, there is probably a simpler approach.
1639 * Changing the MTU is a rare event, it shouldn't matter. 1737 * Changing the MTU is a rare event, it shouldn't matter.
1640 */ 1738 */
1641 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1739 nv_disable_irq(dev);
1642 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1643 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1644 disable_irq(dev->irq);
1645 } else {
1646 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1647 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1648 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1649 }
1650 spin_lock_bh(&dev->xmit_lock); 1740 spin_lock_bh(&dev->xmit_lock);
1651 spin_lock(&np->lock); 1741 spin_lock(&np->lock);
1652 /* stop engines */ 1742 /* stop engines */
@@ -1679,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1679 nv_start_tx(dev); 1769 nv_start_tx(dev);
1680 spin_unlock(&np->lock); 1770 spin_unlock(&np->lock);
1681 spin_unlock_bh(&dev->xmit_lock); 1771 spin_unlock_bh(&dev->xmit_lock);
1682 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1772 nv_enable_irq(dev);
1683 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1684 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1685 enable_irq(dev->irq);
1686 } else {
1687 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1688 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1689 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1690 }
1691 } 1773 }
1692 return 0; 1774 return 0;
1693} 1775}
@@ -2078,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2078 if (!(events & np->irqmask)) 2160 if (!(events & np->irqmask))
2079 break; 2161 break;
2080 2162
2081 spin_lock(&np->lock); 2163 spin_lock_irq(&np->lock);
2082 nv_tx_done(dev); 2164 nv_tx_done(dev);
2083 spin_unlock(&np->lock); 2165 spin_unlock_irq(&np->lock);
2084 2166
2085 if (events & (NVREG_IRQ_TX_ERR)) { 2167 if (events & (NVREG_IRQ_TX_ERR)) {
2086 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2168 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2087 dev->name, events); 2169 dev->name, events);
2088 } 2170 }
2089 if (i > max_interrupt_work) { 2171 if (i > max_interrupt_work) {
2090 spin_lock(&np->lock); 2172 spin_lock_irq(&np->lock);
2091 /* disable interrupts on the nic */ 2173 /* disable interrupts on the nic */
2092 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2174 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2093 pci_push(base); 2175 pci_push(base);
@@ -2097,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2097 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2179 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2098 } 2180 }
2099 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2181 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2100 spin_unlock(&np->lock); 2182 spin_unlock_irq(&np->lock);
2101 break; 2183 break;
2102 } 2184 }
2103 2185
@@ -2127,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2127 2209
2128 nv_rx_process(dev); 2210 nv_rx_process(dev);
2129 if (nv_alloc_rx(dev)) { 2211 if (nv_alloc_rx(dev)) {
2130 spin_lock(&np->lock); 2212 spin_lock_irq(&np->lock);
2131 if (!np->in_shutdown) 2213 if (!np->in_shutdown)
2132 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2214 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2133 spin_unlock(&np->lock); 2215 spin_unlock_irq(&np->lock);
2134 } 2216 }
2135 2217
2136 if (i > max_interrupt_work) { 2218 if (i > max_interrupt_work) {
2137 spin_lock(&np->lock); 2219 spin_lock_irq(&np->lock);
2138 /* disable interrupts on the nic */ 2220 /* disable interrupts on the nic */
2139 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2221 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2140 pci_push(base); 2222 pci_push(base);
@@ -2144,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2144 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2226 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2145 } 2227 }
2146 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2228 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2147 spin_unlock(&np->lock); 2229 spin_unlock_irq(&np->lock);
2148 break; 2230 break;
2149 } 2231 }
2150 2232
@@ -2173,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2173 break; 2255 break;
2174 2256
2175 if (events & NVREG_IRQ_LINK) { 2257 if (events & NVREG_IRQ_LINK) {
2176 spin_lock(&np->lock); 2258 spin_lock_irq(&np->lock);
2177 nv_link_irq(dev); 2259 nv_link_irq(dev);
2178 spin_unlock(&np->lock); 2260 spin_unlock_irq(&np->lock);
2179 } 2261 }
2180 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2262 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2181 spin_lock(&np->lock); 2263 spin_lock_irq(&np->lock);
2182 nv_linkchange(dev); 2264 nv_linkchange(dev);
2183 spin_unlock(&np->lock); 2265 spin_unlock_irq(&np->lock);
2184 np->link_timeout = jiffies + LINK_TIMEOUT; 2266 np->link_timeout = jiffies + LINK_TIMEOUT;
2185 } 2267 }
2186 if (events & (NVREG_IRQ_UNKNOWN)) { 2268 if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2188,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2188 dev->name, events); 2270 dev->name, events);
2189 } 2271 }
2190 if (i > max_interrupt_work) { 2272 if (i > max_interrupt_work) {
2191 spin_lock(&np->lock); 2273 spin_lock_irq(&np->lock);
2192 /* disable interrupts on the nic */ 2274 /* disable interrupts on the nic */
2193 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2275 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2194 pci_push(base); 2276 pci_push(base);
@@ -2198,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2198 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2280 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2199 } 2281 }
2200 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2282 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2201 spin_unlock(&np->lock); 2283 spin_unlock_irq(&np->lock);
2202 break; 2284 break;
2203 } 2285 }
2204 2286
@@ -2221,10 +2303,11 @@ static void nv_do_nic_poll(unsigned long data)
2221 * nv_nic_irq because that may decide to do otherwise 2303 * nv_nic_irq because that may decide to do otherwise
2222 */ 2304 */
2223 2305
2224 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2306 if (!using_multi_irqs(dev)) {
2225 ((np->msi_flags & NV_MSI_X_ENABLED) && 2307 if (np->msi_flags & NV_MSI_X_ENABLED)
2226 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 2308 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2227 disable_irq(dev->irq); 2309 else
2310 disable_irq(dev->irq);
2228 mask = np->irqmask; 2311 mask = np->irqmask;
2229 } else { 2312 } else {
2230 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2313 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -2247,11 +2330,12 @@ static void nv_do_nic_poll(unsigned long data)
2247 writel(mask, base + NvRegIrqMask); 2330 writel(mask, base + NvRegIrqMask);
2248 pci_push(base); 2331 pci_push(base);
2249 2332
2250 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2333 if (!using_multi_irqs(dev)) {
2251 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2252 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2253 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2334 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
2254 enable_irq(dev->irq); 2335 if (np->msi_flags & NV_MSI_X_ENABLED)
2336 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2337 else
2338 enable_irq(dev->irq);
2255 } else { 2339 } else {
2256 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2340 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2257 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); 2341 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
@@ -2488,11 +2572,11 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2488} 2572}
2489 2573
2490#define FORCEDETH_REGS_VER 1 2574#define FORCEDETH_REGS_VER 1
2491#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
2492 2575
2493static int nv_get_regs_len(struct net_device *dev) 2576static int nv_get_regs_len(struct net_device *dev)
2494{ 2577{
2495 return FORCEDETH_REGS_SIZE; 2578 struct fe_priv *np = netdev_priv(dev);
2579 return np->register_size;
2496} 2580}
2497 2581
2498static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 2582static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
@@ -2504,7 +2588,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
2504 2588
2505 regs->version = FORCEDETH_REGS_VER; 2589 regs->version = FORCEDETH_REGS_VER;
2506 spin_lock_irq(&np->lock); 2590 spin_lock_irq(&np->lock);
2507 for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++) 2591 for (i = 0;i <= np->register_size/sizeof(u32); i++)
2508 rbuf[i] = readl(base + i*sizeof(u32)); 2592 rbuf[i] = readl(base + i*sizeof(u32));
2509 spin_unlock_irq(&np->lock); 2593 spin_unlock_irq(&np->lock);
2510} 2594}
@@ -2598,6 +2682,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
2598 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 2682 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
2599} 2683}
2600 2684
2685static int nv_request_irq(struct net_device *dev)
2686{
2687 struct fe_priv *np = get_nvpriv(dev);
2688 u8 __iomem *base = get_hwbase(dev);
2689 int ret = 1;
2690 int i;
2691
2692 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2693 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2694 np->msi_x_entry[i].entry = i;
2695 }
2696 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2697 np->msi_flags |= NV_MSI_X_ENABLED;
2698 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2699 /* Request irq for rx handling */
2700 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2701 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2702 pci_disable_msix(np->pci_dev);
2703 np->msi_flags &= ~NV_MSI_X_ENABLED;
2704 goto out_err;
2705 }
2706 /* Request irq for tx handling */
2707 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2708 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2709 pci_disable_msix(np->pci_dev);
2710 np->msi_flags &= ~NV_MSI_X_ENABLED;
2711 goto out_free_rx;
2712 }
2713 /* Request irq for link and timer handling */
2714 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2715 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2716 pci_disable_msix(np->pci_dev);
2717 np->msi_flags &= ~NV_MSI_X_ENABLED;
2718 goto out_free_tx;
2719 }
2720 /* map interrupts to their respective vector */
2721 writel(0, base + NvRegMSIXMap0);
2722 writel(0, base + NvRegMSIXMap1);
2723 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2724 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2725 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2726 } else {
2727 /* Request irq for all interrupts */
2728 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2729 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2730 pci_disable_msix(np->pci_dev);
2731 np->msi_flags &= ~NV_MSI_X_ENABLED;
2732 goto out_err;
2733 }
2734
2735 /* map interrupts to vector 0 */
2736 writel(0, base + NvRegMSIXMap0);
2737 writel(0, base + NvRegMSIXMap1);
2738 }
2739 }
2740 }
2741 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2742 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2743 np->msi_flags |= NV_MSI_ENABLED;
2744 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2745 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2746 pci_disable_msi(np->pci_dev);
2747 np->msi_flags &= ~NV_MSI_ENABLED;
2748 goto out_err;
2749 }
2750
2751 /* map interrupts to vector 0 */
2752 writel(0, base + NvRegMSIMap0);
2753 writel(0, base + NvRegMSIMap1);
2754 /* enable msi vector 0 */
2755 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2756 }
2757 }
2758 if (ret != 0) {
2759 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
2760 goto out_err;
2761 }
2762
2763 return 0;
2764out_free_tx:
2765 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
2766out_free_rx:
2767 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
2768out_err:
2769 return 1;
2770}
2771
2772static void nv_free_irq(struct net_device *dev)
2773{
2774 struct fe_priv *np = get_nvpriv(dev);
2775 int i;
2776
2777 if (np->msi_flags & NV_MSI_X_ENABLED) {
2778 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2779 free_irq(np->msi_x_entry[i].vector, dev);
2780 }
2781 pci_disable_msix(np->pci_dev);
2782 np->msi_flags &= ~NV_MSI_X_ENABLED;
2783 } else {
2784 free_irq(np->pci_dev->irq, dev);
2785 if (np->msi_flags & NV_MSI_ENABLED) {
2786 pci_disable_msi(np->pci_dev);
2787 np->msi_flags &= ~NV_MSI_ENABLED;
2788 }
2789 }
2790}
2791
2601static int nv_open(struct net_device *dev) 2792static int nv_open(struct net_device *dev)
2602{ 2793{
2603 struct fe_priv *np = netdev_priv(dev); 2794 struct fe_priv *np = netdev_priv(dev);
@@ -2608,6 +2799,8 @@ static int nv_open(struct net_device *dev)
2608 dprintk(KERN_DEBUG "nv_open: begin\n"); 2799 dprintk(KERN_DEBUG "nv_open: begin\n");
2609 2800
2610 /* 1) erase previous misconfiguration */ 2801 /* 1) erase previous misconfiguration */
2802 if (np->driver_data & DEV_HAS_POWER_CNTRL)
2803 nv_mac_reset(dev);
2611 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ 2804 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
2612 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2805 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
2613 writel(0, base + NvRegMulticastAddrB); 2806 writel(0, base + NvRegMulticastAddrB);
@@ -2688,12 +2881,16 @@ static int nv_open(struct net_device *dev)
2688 udelay(10); 2881 udelay(10);
2689 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 2882 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
2690 2883
2691 writel(0, base + NvRegIrqMask); 2884 nv_disable_hw_interrupts(dev, np->irqmask);
2692 pci_push(base); 2885 pci_push(base);
2693 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 2886 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
2694 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2887 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2695 pci_push(base); 2888 pci_push(base);
2696 2889
2890 if (nv_request_irq(dev)) {
2891 goto out_drain;
2892 }
2893
2697 if (np->msi_flags & NV_MSI_X_CAPABLE) { 2894 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2698 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2895 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2699 np->msi_x_entry[i].entry = i; 2896 np->msi_x_entry[i].entry = i;
@@ -2767,7 +2964,7 @@ static int nv_open(struct net_device *dev)
2767 } 2964 }
2768 2965
2769 /* ask for interrupts */ 2966 /* ask for interrupts */
2770 writel(np->irqmask, base + NvRegIrqMask); 2967 nv_enable_hw_interrupts(dev, np->irqmask);
2771 2968
2772 spin_lock_irq(&np->lock); 2969 spin_lock_irq(&np->lock);
2773 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2970 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -2811,7 +3008,6 @@ static int nv_close(struct net_device *dev)
2811{ 3008{
2812 struct fe_priv *np = netdev_priv(dev); 3009 struct fe_priv *np = netdev_priv(dev);
2813 u8 __iomem *base; 3010 u8 __iomem *base;
2814 int i;
2815 3011
2816 spin_lock_irq(&np->lock); 3012 spin_lock_irq(&np->lock);
2817 np->in_shutdown = 1; 3013 np->in_shutdown = 1;
@@ -2829,31 +3025,13 @@ static int nv_close(struct net_device *dev)
2829 3025
2830 /* disable interrupts on the nic or we will lock up */ 3026 /* disable interrupts on the nic or we will lock up */
2831 base = get_hwbase(dev); 3027 base = get_hwbase(dev);
2832 if (np->msi_flags & NV_MSI_X_ENABLED) { 3028 nv_disable_hw_interrupts(dev, np->irqmask);
2833 writel(np->irqmask, base + NvRegIrqMask);
2834 } else {
2835 if (np->msi_flags & NV_MSI_ENABLED)
2836 writel(0, base + NvRegMSIIrqMask);
2837 writel(0, base + NvRegIrqMask);
2838 }
2839 pci_push(base); 3029 pci_push(base);
2840 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 3030 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
2841 3031
2842 spin_unlock_irq(&np->lock); 3032 spin_unlock_irq(&np->lock);
2843 3033
2844 if (np->msi_flags & NV_MSI_X_ENABLED) { 3034 nv_free_irq(dev);
2845 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2846 free_irq(np->msi_x_entry[i].vector, dev);
2847 }
2848 pci_disable_msix(np->pci_dev);
2849 np->msi_flags &= ~NV_MSI_X_ENABLED;
2850 } else {
2851 free_irq(np->pci_dev->irq, dev);
2852 if (np->msi_flags & NV_MSI_ENABLED) {
2853 pci_disable_msi(np->pci_dev);
2854 np->msi_flags &= ~NV_MSI_ENABLED;
2855 }
2856 }
2857 3035
2858 drain_ring(dev); 3036 drain_ring(dev);
2859 3037
@@ -2878,6 +3056,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2878 unsigned long addr; 3056 unsigned long addr;
2879 u8 __iomem *base; 3057 u8 __iomem *base;
2880 int err, i; 3058 int err, i;
3059 u32 powerstate;
2881 3060
2882 dev = alloc_etherdev(sizeof(struct fe_priv)); 3061 dev = alloc_etherdev(sizeof(struct fe_priv));
2883 err = -ENOMEM; 3062 err = -ENOMEM;
@@ -2910,6 +3089,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2910 if (err < 0) 3089 if (err < 0)
2911 goto out_disable; 3090 goto out_disable;
2912 3091
3092 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL))
3093 np->register_size = NV_PCI_REGSZ_VER2;
3094 else
3095 np->register_size = NV_PCI_REGSZ_VER1;
3096
2913 err = -EINVAL; 3097 err = -EINVAL;
2914 addr = 0; 3098 addr = 0;
2915 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3099 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2918,7 +3102,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2918 pci_resource_len(pci_dev, i), 3102 pci_resource_len(pci_dev, i),
2919 pci_resource_flags(pci_dev, i)); 3103 pci_resource_flags(pci_dev, i));
2920 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 3104 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
2921 pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) { 3105 pci_resource_len(pci_dev, i) >= np->register_size) {
2922 addr = pci_resource_start(pci_dev, i); 3106 addr = pci_resource_start(pci_dev, i);
2923 break; 3107 break;
2924 } 3108 }
@@ -2929,24 +3113,25 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2929 goto out_relreg; 3113 goto out_relreg;
2930 } 3114 }
2931 3115
3116 /* copy of driver data */
3117 np->driver_data = id->driver_data;
3118
2932 /* handle different descriptor versions */ 3119 /* handle different descriptor versions */
2933 if (id->driver_data & DEV_HAS_HIGH_DMA) { 3120 if (id->driver_data & DEV_HAS_HIGH_DMA) {
2934 /* packet format 3: supports 40-bit addressing */ 3121 /* packet format 3: supports 40-bit addressing */
2935 np->desc_ver = DESC_VER_3; 3122 np->desc_ver = DESC_VER_3;
3123 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2936 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { 3124 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
2937 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 3125 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2938 pci_name(pci_dev)); 3126 pci_name(pci_dev));
2939 } else { 3127 } else {
2940 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { 3128 dev->features |= NETIF_F_HIGHDMA;
2941 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", 3129 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2942 pci_name(pci_dev)); 3130 }
2943 goto out_relreg; 3131 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2944 } else { 3132 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
2945 dev->features |= NETIF_F_HIGHDMA; 3133 pci_name(pci_dev));
2946 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2947 }
2948 } 3134 }
2949 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2950 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 3135 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
2951 /* packet format 2: supports jumbo frames */ 3136 /* packet format 2: supports jumbo frames */
2952 np->desc_ver = DESC_VER_2; 3137 np->desc_ver = DESC_VER_2;
@@ -2986,7 +3171,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2986 } 3171 }
2987 3172
2988 err = -ENOMEM; 3173 err = -ENOMEM;
2989 np->base = ioremap(addr, NV_PCI_REGSZ); 3174 np->base = ioremap(addr, np->register_size);
2990 if (!np->base) 3175 if (!np->base)
2991 goto out_relreg; 3176 goto out_relreg;
2992 dev->base_addr = (unsigned long)np->base; 3177 dev->base_addr = (unsigned long)np->base;
@@ -3062,6 +3247,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
3062 writel(0, base + NvRegWakeUpFlags); 3247 writel(0, base + NvRegWakeUpFlags);
3063 np->wolenabled = 0; 3248 np->wolenabled = 0;
3064 3249
3250 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
3251 u8 revision_id;
3252 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
3253
3254 /* take phy and nic out of low power mode */
3255 powerstate = readl(base + NvRegPowerState2);
3256 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
3257 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
3258 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
3259 revision_id >= 0xA3)
3260 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
3261 writel(powerstate, base + NvRegPowerState2);
3262 }
3263
3065 if (np->desc_ver == DESC_VER_1) { 3264 if (np->desc_ver == DESC_VER_1) {
3066 np->tx_flags = NV_TX_VALID; 3265 np->tx_flags = NV_TX_VALID;
3067 } else { 3266 } else {
@@ -3223,19 +3422,19 @@ static struct pci_device_id pci_tbl[] = {
3223 }, 3422 },
3224 { /* MCP51 Ethernet Controller */ 3423 { /* MCP51 Ethernet Controller */
3225 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 3424 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
3226 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, 3425 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
3227 }, 3426 },
3228 { /* MCP51 Ethernet Controller */ 3427 { /* MCP51 Ethernet Controller */
3229 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 3428 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
3230 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, 3429 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
3231 }, 3430 },
3232 { /* MCP55 Ethernet Controller */ 3431 { /* MCP55 Ethernet Controller */
3233 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 3432 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
3234 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, 3433 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
3235 }, 3434 },
3236 { /* MCP55 Ethernet Controller */ 3435 { /* MCP55 Ethernet Controller */
3237 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 3436 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
3238 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, 3437 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
3239 }, 3438 },
3240 {0,}, 3439 {0,},
3241}; 3440};
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 771e25d8c417..218d31764c52 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -210,7 +210,8 @@ static int gfar_probe(struct platform_device *pdev)
210 goto regs_fail; 210 goto regs_fail;
211 } 211 }
212 212
213 spin_lock_init(&priv->lock); 213 spin_lock_init(&priv->txlock);
214 spin_lock_init(&priv->rxlock);
214 215
215 platform_set_drvdata(pdev, dev); 216 platform_set_drvdata(pdev, dev);
216 217
@@ -515,11 +516,13 @@ void stop_gfar(struct net_device *dev)
515 phy_stop(priv->phydev); 516 phy_stop(priv->phydev);
516 517
517 /* Lock it down */ 518 /* Lock it down */
518 spin_lock_irqsave(&priv->lock, flags); 519 spin_lock_irqsave(&priv->txlock, flags);
520 spin_lock(&priv->rxlock);
519 521
520 gfar_halt(dev); 522 gfar_halt(dev);
521 523
522 spin_unlock_irqrestore(&priv->lock, flags); 524 spin_unlock(&priv->rxlock);
525 spin_unlock_irqrestore(&priv->txlock, flags);
523 526
524 /* Free the IRQs */ 527 /* Free the IRQs */
525 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 528 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -605,14 +608,15 @@ void gfar_start(struct net_device *dev)
605 tempval |= DMACTRL_INIT_SETTINGS; 608 tempval |= DMACTRL_INIT_SETTINGS;
606 gfar_write(&priv->regs->dmactrl, tempval); 609 gfar_write(&priv->regs->dmactrl, tempval);
607 610
608 /* Clear THLT, so that the DMA starts polling now */
609 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
610
611 /* Make sure we aren't stopped */ 611 /* Make sure we aren't stopped */
612 tempval = gfar_read(&priv->regs->dmactrl); 612 tempval = gfar_read(&priv->regs->dmactrl);
613 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 613 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
614 gfar_write(&priv->regs->dmactrl, tempval); 614 gfar_write(&priv->regs->dmactrl, tempval);
615 615
616 /* Clear THLT/RHLT, so that the DMA starts polling now */
617 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
618 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
619
616 /* Unmask the interrupts we look for */ 620 /* Unmask the interrupts we look for */
617 gfar_write(&regs->imask, IMASK_DEFAULT); 621 gfar_write(&regs->imask, IMASK_DEFAULT);
618} 622}
@@ -928,12 +932,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
928 struct txfcb *fcb = NULL; 932 struct txfcb *fcb = NULL;
929 struct txbd8 *txbdp; 933 struct txbd8 *txbdp;
930 u16 status; 934 u16 status;
935 unsigned long flags;
931 936
932 /* Update transmit stats */ 937 /* Update transmit stats */
933 priv->stats.tx_bytes += skb->len; 938 priv->stats.tx_bytes += skb->len;
934 939
935 /* Lock priv now */ 940 /* Lock priv now */
936 spin_lock_irq(&priv->lock); 941 spin_lock_irqsave(&priv->txlock, flags);
937 942
938 /* Point at the first free tx descriptor */ 943 /* Point at the first free tx descriptor */
939 txbdp = priv->cur_tx; 944 txbdp = priv->cur_tx;
@@ -1004,7 +1009,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1004 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1009 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1005 1010
1006 /* Unlock priv */ 1011 /* Unlock priv */
1007 spin_unlock_irq(&priv->lock); 1012 spin_unlock_irqrestore(&priv->txlock, flags);
1008 1013
1009 return 0; 1014 return 0;
1010} 1015}
@@ -1049,7 +1054,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1049 unsigned long flags; 1054 unsigned long flags;
1050 u32 tempval; 1055 u32 tempval;
1051 1056
1052 spin_lock_irqsave(&priv->lock, flags); 1057 spin_lock_irqsave(&priv->rxlock, flags);
1053 1058
1054 priv->vlgrp = grp; 1059 priv->vlgrp = grp;
1055 1060
@@ -1076,7 +1081,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1076 gfar_write(&priv->regs->rctrl, tempval); 1081 gfar_write(&priv->regs->rctrl, tempval);
1077 } 1082 }
1078 1083
1079 spin_unlock_irqrestore(&priv->lock, flags); 1084 spin_unlock_irqrestore(&priv->rxlock, flags);
1080} 1085}
1081 1086
1082 1087
@@ -1085,12 +1090,12 @@ static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1085 struct gfar_private *priv = netdev_priv(dev); 1090 struct gfar_private *priv = netdev_priv(dev);
1086 unsigned long flags; 1091 unsigned long flags;
1087 1092
1088 spin_lock_irqsave(&priv->lock, flags); 1093 spin_lock_irqsave(&priv->rxlock, flags);
1089 1094
1090 if (priv->vlgrp) 1095 if (priv->vlgrp)
1091 priv->vlgrp->vlan_devices[vid] = NULL; 1096 priv->vlgrp->vlan_devices[vid] = NULL;
1092 1097
1093 spin_unlock_irqrestore(&priv->lock, flags); 1098 spin_unlock_irqrestore(&priv->rxlock, flags);
1094} 1099}
1095 1100
1096 1101
@@ -1179,7 +1184,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1179 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1184 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1180 1185
1181 /* Lock priv */ 1186 /* Lock priv */
1182 spin_lock(&priv->lock); 1187 spin_lock(&priv->txlock);
1183 bdp = priv->dirty_tx; 1188 bdp = priv->dirty_tx;
1184 while ((bdp->status & TXBD_READY) == 0) { 1189 while ((bdp->status & TXBD_READY) == 0) {
1185 /* If dirty_tx and cur_tx are the same, then either the */ 1190 /* If dirty_tx and cur_tx are the same, then either the */
@@ -1224,7 +1229,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1224 else 1229 else
1225 gfar_write(&priv->regs->txic, 0); 1230 gfar_write(&priv->regs->txic, 0);
1226 1231
1227 spin_unlock(&priv->lock); 1232 spin_unlock(&priv->txlock);
1228 1233
1229 return IRQ_HANDLED; 1234 return IRQ_HANDLED;
1230} 1235}
@@ -1305,9 +1310,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1305{ 1310{
1306 struct net_device *dev = (struct net_device *) dev_id; 1311 struct net_device *dev = (struct net_device *) dev_id;
1307 struct gfar_private *priv = netdev_priv(dev); 1312 struct gfar_private *priv = netdev_priv(dev);
1308
1309#ifdef CONFIG_GFAR_NAPI 1313#ifdef CONFIG_GFAR_NAPI
1310 u32 tempval; 1314 u32 tempval;
1315#else
1316 unsigned long flags;
1311#endif 1317#endif
1312 1318
1313 /* Clear IEVENT, so rx interrupt isn't called again 1319 /* Clear IEVENT, so rx interrupt isn't called again
@@ -1330,7 +1336,7 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1330 } 1336 }
1331#else 1337#else
1332 1338
1333 spin_lock(&priv->lock); 1339 spin_lock_irqsave(&priv->rxlock, flags);
1334 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1340 gfar_clean_rx_ring(dev, priv->rx_ring_size);
1335 1341
1336 /* If we are coalescing interrupts, update the timer */ 1342 /* If we are coalescing interrupts, update the timer */
@@ -1341,7 +1347,7 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1341 else 1347 else
1342 gfar_write(&priv->regs->rxic, 0); 1348 gfar_write(&priv->regs->rxic, 0);
1343 1349
1344 spin_unlock(&priv->lock); 1350 spin_unlock_irqrestore(&priv->rxlock, flags);
1345#endif 1351#endif
1346 1352
1347 return IRQ_HANDLED; 1353 return IRQ_HANDLED;
@@ -1490,13 +1496,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1490 /* Update the current rxbd pointer to be the next one */ 1496 /* Update the current rxbd pointer to be the next one */
1491 priv->cur_rx = bdp; 1497 priv->cur_rx = bdp;
1492 1498
1493 /* If no packets have arrived since the
1494 * last one we processed, clear the IEVENT RX and
1495 * BSY bits so that another interrupt won't be
1496 * generated when we set IMASK */
1497 if (bdp->status & RXBD_EMPTY)
1498 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1499
1500 return howmany; 1499 return howmany;
1501} 1500}
1502 1501
@@ -1516,7 +1515,7 @@ static int gfar_poll(struct net_device *dev, int *budget)
1516 rx_work_limit -= howmany; 1515 rx_work_limit -= howmany;
1517 *budget -= howmany; 1516 *budget -= howmany;
1518 1517
1519 if (rx_work_limit >= 0) { 1518 if (rx_work_limit > 0) {
1520 netif_rx_complete(dev); 1519 netif_rx_complete(dev);
1521 1520
1522 /* Clear the halt bit in RSTAT */ 1521 /* Clear the halt bit in RSTAT */
@@ -1533,7 +1532,8 @@ static int gfar_poll(struct net_device *dev, int *budget)
1533 gfar_write(&priv->regs->rxic, 0); 1532 gfar_write(&priv->regs->rxic, 0);
1534 } 1533 }
1535 1534
1536 return (rx_work_limit < 0) ? 1 : 0; 1535 /* Return 1 if there's more work to do */
1536 return (rx_work_limit > 0) ? 0 : 1;
1537} 1537}
1538#endif 1538#endif
1539 1539
@@ -1629,7 +1629,7 @@ static void adjust_link(struct net_device *dev)
1629 struct phy_device *phydev = priv->phydev; 1629 struct phy_device *phydev = priv->phydev;
1630 int new_state = 0; 1630 int new_state = 0;
1631 1631
1632 spin_lock_irqsave(&priv->lock, flags); 1632 spin_lock_irqsave(&priv->txlock, flags);
1633 if (phydev->link) { 1633 if (phydev->link) {
1634 u32 tempval = gfar_read(&regs->maccfg2); 1634 u32 tempval = gfar_read(&regs->maccfg2);
1635 u32 ecntrl = gfar_read(&regs->ecntrl); 1635 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -1694,7 +1694,7 @@ static void adjust_link(struct net_device *dev)
1694 if (new_state && netif_msg_link(priv)) 1694 if (new_state && netif_msg_link(priv))
1695 phy_print_status(phydev); 1695 phy_print_status(phydev);
1696 1696
1697 spin_unlock_irqrestore(&priv->lock, flags); 1697 spin_unlock_irqrestore(&priv->txlock, flags);
1698} 1698}
1699 1699
1700/* Update the hash table based on the current list of multicast 1700/* Update the hash table based on the current list of multicast
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index d37d5401be6e..127c98cf3336 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -656,43 +656,62 @@ struct gfar {
656 * the buffer descriptor determines the actual condition. 656 * the buffer descriptor determines the actual condition.
657 */ 657 */
658struct gfar_private { 658struct gfar_private {
659 /* pointers to arrays of skbuffs for tx and rx */ 659 /* Fields controlled by TX lock */
660 spinlock_t txlock;
661
662 /* Pointer to the array of skbuffs */
660 struct sk_buff ** tx_skbuff; 663 struct sk_buff ** tx_skbuff;
661 struct sk_buff ** rx_skbuff;
662 664
663 /* indices pointing to the next free sbk in skb arrays */ 665 /* next free skb in the array */
664 u16 skb_curtx; 666 u16 skb_curtx;
665 u16 skb_currx;
666 667
667 /* index of the first skb which hasn't been transmitted 668 /* First skb in line to be transmitted */
668 * yet. */
669 u16 skb_dirtytx; 669 u16 skb_dirtytx;
670 670
671 /* Configuration info for the coalescing features */ 671 /* Configuration info for the coalescing features */
672 unsigned char txcoalescing; 672 unsigned char txcoalescing;
673 unsigned short txcount; 673 unsigned short txcount;
674 unsigned short txtime; 674 unsigned short txtime;
675
676 /* Buffer descriptor pointers */
677 struct txbd8 *tx_bd_base; /* First tx buffer descriptor */
678 struct txbd8 *cur_tx; /* Next free ring entry */
679 struct txbd8 *dirty_tx; /* First buffer in line
680 to be transmitted */
681 unsigned int tx_ring_size;
682
683 /* RX Locked fields */
684 spinlock_t rxlock;
685
686 /* skb array and index */
687 struct sk_buff ** rx_skbuff;
688 u16 skb_currx;
689
690 /* RX Coalescing values */
675 unsigned char rxcoalescing; 691 unsigned char rxcoalescing;
676 unsigned short rxcount; 692 unsigned short rxcount;
677 unsigned short rxtime; 693 unsigned short rxtime;
678 694
679 /* GFAR addresses */ 695 struct rxbd8 *rx_bd_base; /* First Rx buffers */
680 struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */
681 struct txbd8 *tx_bd_base;
682 struct rxbd8 *cur_rx; /* Next free rx ring entry */ 696 struct rxbd8 *cur_rx; /* Next free rx ring entry */
683 struct txbd8 *cur_tx; /* Next free ring entry */ 697
684 struct txbd8 *dirty_tx; /* The Ring entry to be freed. */ 698 /* RX parameters */
685 struct gfar __iomem *regs; /* Pointer to the GFAR memory mapped Registers */ 699 unsigned int rx_ring_size;
686 u32 __iomem *hash_regs[16];
687 int hash_width;
688 struct net_device_stats stats; /* linux network statistics */
689 struct gfar_extra_stats extra_stats;
690 spinlock_t lock;
691 unsigned int rx_buffer_size; 700 unsigned int rx_buffer_size;
692 unsigned int rx_stash_size; 701 unsigned int rx_stash_size;
693 unsigned int rx_stash_index; 702 unsigned int rx_stash_index;
694 unsigned int tx_ring_size; 703
695 unsigned int rx_ring_size; 704 struct vlan_group *vlgrp;
705
706 /* Unprotected fields */
707 /* Pointer to the GFAR memory mapped Registers */
708 struct gfar __iomem *regs;
709
710 /* Hash registers and their width */
711 u32 __iomem *hash_regs[16];
712 int hash_width;
713
714 /* global parameters */
696 unsigned int fifo_threshold; 715 unsigned int fifo_threshold;
697 unsigned int fifo_starve; 716 unsigned int fifo_starve;
698 unsigned int fifo_starve_off; 717 unsigned int fifo_starve_off;
@@ -702,13 +721,15 @@ struct gfar_private {
702 extended_hash:1, 721 extended_hash:1,
703 bd_stash_en:1; 722 bd_stash_en:1;
704 unsigned short padding; 723 unsigned short padding;
705 struct vlan_group *vlgrp; 724
706 /* Info structure initialized by board setup code */
707 unsigned int interruptTransmit; 725 unsigned int interruptTransmit;
708 unsigned int interruptReceive; 726 unsigned int interruptReceive;
709 unsigned int interruptError; 727 unsigned int interruptError;
728
729 /* info structure initialized by platform code */
710 struct gianfar_platform_data *einfo; 730 struct gianfar_platform_data *einfo;
711 731
732 /* PHY stuff */
712 struct phy_device *phydev; 733 struct phy_device *phydev;
713 struct mii_bus *mii_bus; 734 struct mii_bus *mii_bus;
714 int oldspeed; 735 int oldspeed;
@@ -716,6 +737,10 @@ struct gfar_private {
716 int oldlink; 737 int oldlink;
717 738
718 uint32_t msg_enable; 739 uint32_t msg_enable;
740
741 /* Network Statistics */
742 struct net_device_stats stats;
743 struct gfar_extra_stats extra_stats;
719}; 744};
720 745
721static inline u32 gfar_read(volatile unsigned __iomem *addr) 746static inline u32 gfar_read(volatile unsigned __iomem *addr)
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5de7b2e259dc..d69698c695ef 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -455,10 +455,14 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
455 455
456 /* Halt TX and RX, and process the frames which 456 /* Halt TX and RX, and process the frames which
457 * have already been received */ 457 * have already been received */
458 spin_lock_irqsave(&priv->lock, flags); 458 spin_lock_irqsave(&priv->txlock, flags);
459 spin_lock(&priv->rxlock);
460
459 gfar_halt(dev); 461 gfar_halt(dev);
460 gfar_clean_rx_ring(dev, priv->rx_ring_size); 462 gfar_clean_rx_ring(dev, priv->rx_ring_size);
461 spin_unlock_irqrestore(&priv->lock, flags); 463
464 spin_unlock(&priv->rxlock);
465 spin_unlock_irqrestore(&priv->txlock, flags);
462 466
463 /* Now we take down the rings to rebuild them */ 467 /* Now we take down the rings to rebuild them */
464 stop_gfar(dev); 468 stop_gfar(dev);
@@ -488,10 +492,14 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
488 492
489 /* Halt TX and RX, and process the frames which 493 /* Halt TX and RX, and process the frames which
490 * have already been received */ 494 * have already been received */
491 spin_lock_irqsave(&priv->lock, flags); 495 spin_lock_irqsave(&priv->txlock, flags);
496 spin_lock(&priv->rxlock);
497
492 gfar_halt(dev); 498 gfar_halt(dev);
493 gfar_clean_rx_ring(dev, priv->rx_ring_size); 499 gfar_clean_rx_ring(dev, priv->rx_ring_size);
494 spin_unlock_irqrestore(&priv->lock, flags); 500
501 spin_unlock(&priv->rxlock);
502 spin_unlock_irqrestore(&priv->txlock, flags);
495 503
496 /* Now we take down the rings to rebuild them */ 504 /* Now we take down the rings to rebuild them */
497 stop_gfar(dev); 505 stop_gfar(dev);
@@ -523,7 +531,7 @@ static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
523 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 531 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
524 return -EOPNOTSUPP; 532 return -EOPNOTSUPP;
525 533
526 spin_lock_irqsave(&priv->lock, flags); 534 spin_lock_irqsave(&priv->txlock, flags);
527 gfar_halt(dev); 535 gfar_halt(dev);
528 536
529 if (data) 537 if (data)
@@ -532,7 +540,7 @@ static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
532 dev->features &= ~NETIF_F_IP_CSUM; 540 dev->features &= ~NETIF_F_IP_CSUM;
533 541
534 gfar_start(dev); 542 gfar_start(dev);
535 spin_unlock_irqrestore(&priv->lock, flags); 543 spin_unlock_irqrestore(&priv->txlock, flags);
536 544
537 return 0; 545 return 0;
538} 546}
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 51ef181b1368..a6d5c43199cb 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -82,7 +82,7 @@ static ssize_t gfar_set_bd_stash(struct class_device *cdev,
82 else 82 else
83 return count; 83 return count;
84 84
85 spin_lock_irqsave(&priv->lock, flags); 85 spin_lock_irqsave(&priv->rxlock, flags);
86 86
87 /* Set the new stashing value */ 87 /* Set the new stashing value */
88 priv->bd_stash_en = new_setting; 88 priv->bd_stash_en = new_setting;
@@ -96,7 +96,7 @@ static ssize_t gfar_set_bd_stash(struct class_device *cdev,
96 96
97 gfar_write(&priv->regs->attr, temp); 97 gfar_write(&priv->regs->attr, temp);
98 98
99 spin_unlock_irqrestore(&priv->lock, flags); 99 spin_unlock_irqrestore(&priv->rxlock, flags);
100 100
101 return count; 101 return count;
102} 102}
@@ -118,7 +118,7 @@ static ssize_t gfar_set_rx_stash_size(struct class_device *cdev,
118 u32 temp; 118 u32 temp;
119 unsigned long flags; 119 unsigned long flags;
120 120
121 spin_lock_irqsave(&priv->lock, flags); 121 spin_lock_irqsave(&priv->rxlock, flags);
122 if (length > priv->rx_buffer_size) 122 if (length > priv->rx_buffer_size)
123 return count; 123 return count;
124 124
@@ -142,7 +142,7 @@ static ssize_t gfar_set_rx_stash_size(struct class_device *cdev,
142 142
143 gfar_write(&priv->regs->attr, temp); 143 gfar_write(&priv->regs->attr, temp);
144 144
145 spin_unlock_irqrestore(&priv->lock, flags); 145 spin_unlock_irqrestore(&priv->rxlock, flags);
146 146
147 return count; 147 return count;
148} 148}
@@ -166,7 +166,7 @@ static ssize_t gfar_set_rx_stash_index(struct class_device *cdev,
166 u32 temp; 166 u32 temp;
167 unsigned long flags; 167 unsigned long flags;
168 168
169 spin_lock_irqsave(&priv->lock, flags); 169 spin_lock_irqsave(&priv->rxlock, flags);
170 if (index > priv->rx_stash_size) 170 if (index > priv->rx_stash_size)
171 return count; 171 return count;
172 172
@@ -180,7 +180,7 @@ static ssize_t gfar_set_rx_stash_index(struct class_device *cdev,
180 temp |= ATTRELI_EI(index); 180 temp |= ATTRELI_EI(index);
181 gfar_write(&priv->regs->attreli, flags); 181 gfar_write(&priv->regs->attreli, flags);
182 182
183 spin_unlock_irqrestore(&priv->lock, flags); 183 spin_unlock_irqrestore(&priv->rxlock, flags);
184 184
185 return count; 185 return count;
186} 186}
@@ -205,7 +205,7 @@ static ssize_t gfar_set_fifo_threshold(struct class_device *cdev,
205 if (length > GFAR_MAX_FIFO_THRESHOLD) 205 if (length > GFAR_MAX_FIFO_THRESHOLD)
206 return count; 206 return count;
207 207
208 spin_lock_irqsave(&priv->lock, flags); 208 spin_lock_irqsave(&priv->txlock, flags);
209 209
210 priv->fifo_threshold = length; 210 priv->fifo_threshold = length;
211 211
@@ -214,7 +214,7 @@ static ssize_t gfar_set_fifo_threshold(struct class_device *cdev,
214 temp |= length; 214 temp |= length;
215 gfar_write(&priv->regs->fifo_tx_thr, temp); 215 gfar_write(&priv->regs->fifo_tx_thr, temp);
216 216
217 spin_unlock_irqrestore(&priv->lock, flags); 217 spin_unlock_irqrestore(&priv->txlock, flags);
218 218
219 return count; 219 return count;
220} 220}
@@ -240,7 +240,7 @@ static ssize_t gfar_set_fifo_starve(struct class_device *cdev,
240 if (num > GFAR_MAX_FIFO_STARVE) 240 if (num > GFAR_MAX_FIFO_STARVE)
241 return count; 241 return count;
242 242
243 spin_lock_irqsave(&priv->lock, flags); 243 spin_lock_irqsave(&priv->txlock, flags);
244 244
245 priv->fifo_starve = num; 245 priv->fifo_starve = num;
246 246
@@ -249,7 +249,7 @@ static ssize_t gfar_set_fifo_starve(struct class_device *cdev,
249 temp |= num; 249 temp |= num;
250 gfar_write(&priv->regs->fifo_tx_starve, temp); 250 gfar_write(&priv->regs->fifo_tx_starve, temp);
251 251
252 spin_unlock_irqrestore(&priv->lock, flags); 252 spin_unlock_irqrestore(&priv->txlock, flags);
253 253
254 return count; 254 return count;
255} 255}
@@ -274,7 +274,7 @@ static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev,
274 if (num > GFAR_MAX_FIFO_STARVE_OFF) 274 if (num > GFAR_MAX_FIFO_STARVE_OFF)
275 return count; 275 return count;
276 276
277 spin_lock_irqsave(&priv->lock, flags); 277 spin_lock_irqsave(&priv->txlock, flags);
278 278
279 priv->fifo_starve_off = num; 279 priv->fifo_starve_off = num;
280 280
@@ -283,7 +283,7 @@ static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev,
283 temp |= num; 283 temp |= num;
284 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp); 284 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
285 285
286 spin_unlock_irqrestore(&priv->lock, flags); 286 spin_unlock_irqrestore(&priv->txlock, flags);
287 287
288 return count; 288 return count;
289} 289}
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 79a8fbcf5f93..0d5fccc984bb 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -582,7 +582,6 @@ static int __init setup_adapter(int card_base, int type, int n)
582 INIT_WORK(&priv->rx_work, rx_bh, priv); 582 INIT_WORK(&priv->rx_work, rx_bh, priv);
583 dev->priv = priv; 583 dev->priv = priv;
584 sprintf(dev->name, "dmascc%i", 2 * n + i); 584 sprintf(dev->name, "dmascc%i", 2 * n + i);
585 SET_MODULE_OWNER(dev);
586 dev->base_addr = card_base; 585 dev->base_addr = card_base;
587 dev->irq = irq; 586 dev->irq = irq;
588 dev->open = scc_open; 587 dev->open = scc_open;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 6ace0e914fd1..5927784df3f9 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1550,7 +1550,6 @@ static unsigned char ax25_nocall[AX25_ADDR_LEN] =
1550 1550
1551static void scc_net_setup(struct net_device *dev) 1551static void scc_net_setup(struct net_device *dev)
1552{ 1552{
1553 SET_MODULE_OWNER(dev);
1554 dev->tx_queue_len = 16; /* should be enough... */ 1553 dev->tx_queue_len = 16; /* should be enough... */
1555 1554
1556 dev->open = scc_net_open; 1555 dev->open = scc_net_open;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index fe22479eb202..b49884048caa 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1098,7 +1098,6 @@ static void yam_setup(struct net_device *dev)
1098 1098
1099 dev->base_addr = yp->iobase; 1099 dev->base_addr = yp->iobase;
1100 dev->irq = yp->irq; 1100 dev->irq = yp->irq;
1101 SET_MODULE_OWNER(dev);
1102 1101
1103 dev->open = yam_open; 1102 dev->open = yam_open;
1104 dev->stop = yam_close; 1103 dev->stop = yam_close;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index ea62a3e7d586..411f4d809c47 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1419,6 +1419,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1419 mv643xx_eth_update_pscr(dev, &cmd); 1419 mv643xx_eth_update_pscr(dev, &cmd);
1420 mv643xx_set_settings(dev, &cmd); 1420 mv643xx_set_settings(dev, &cmd);
1421 1421
1422 SET_MODULE_OWNER(dev);
1423 SET_NETDEV_DEV(dev, &pdev->dev);
1422 err = register_netdev(dev); 1424 err = register_netdev(dev);
1423 if (err) 1425 if (err)
1424 goto out; 1426 goto out;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 67b0eab16589..227df9876a2c 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
51#include "sky2.h" 51#include "sky2.h"
52 52
53#define DRV_NAME "sky2" 53#define DRV_NAME "sky2"
54#define DRV_VERSION "1.1" 54#define DRV_VERSION "1.2"
55#define PFX DRV_NAME " " 55#define PFX DRV_NAME " "
56 56
57/* 57/*
@@ -925,8 +925,7 @@ static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
925 skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask); 925 skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
926 if (likely(skb)) { 926 if (likely(skb)) {
927 unsigned long p = (unsigned long) skb->data; 927 unsigned long p = (unsigned long) skb->data;
928 skb_reserve(skb, 928 skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
929 ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
930 } 929 }
931 930
932 return skb; 931 return skb;
@@ -1686,13 +1685,12 @@ static void sky2_tx_timeout(struct net_device *dev)
1686} 1685}
1687 1686
1688 1687
1689#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1690/* Want receive buffer size to be multiple of 64 bits 1688/* Want receive buffer size to be multiple of 64 bits
1691 * and incl room for vlan and truncation 1689 * and incl room for vlan and truncation
1692 */ 1690 */
1693static inline unsigned sky2_buf_size(int mtu) 1691static inline unsigned sky2_buf_size(int mtu)
1694{ 1692{
1695 return roundup(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8; 1693 return ALIGN(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8;
1696} 1694}
1697 1695
1698static int sky2_change_mtu(struct net_device *dev, int new_mtu) 1696static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -2086,6 +2084,20 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
2086 } 2084 }
2087} 2085}
2088 2086
2087/* If idle then force a fake soft NAPI poll once a second
2088 * to work around cases where sharing an edge triggered interrupt.
2089 */
2090static void sky2_idle(unsigned long arg)
2091{
2092 struct net_device *dev = (struct net_device *) arg;
2093
2094 local_irq_disable();
2095 if (__netif_rx_schedule_prep(dev))
2096 __netif_rx_schedule(dev);
2097 local_irq_enable();
2098}
2099
2100
2089static int sky2_poll(struct net_device *dev0, int *budget) 2101static int sky2_poll(struct net_device *dev0, int *budget)
2090{ 2102{
2091 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; 2103 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
@@ -2093,6 +2105,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2093 int work_done = 0; 2105 int work_done = 0;
2094 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2106 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2095 2107
2108 restart_poll:
2096 if (unlikely(status & ~Y2_IS_STAT_BMU)) { 2109 if (unlikely(status & ~Y2_IS_STAT_BMU)) {
2097 if (status & Y2_IS_HW_ERR) 2110 if (status & Y2_IS_HW_ERR)
2098 sky2_hw_intr(hw); 2111 sky2_hw_intr(hw);
@@ -2123,7 +2136,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2123 } 2136 }
2124 2137
2125 if (status & Y2_IS_STAT_BMU) { 2138 if (status & Y2_IS_STAT_BMU) {
2126 work_done = sky2_status_intr(hw, work_limit); 2139 work_done += sky2_status_intr(hw, work_limit - work_done);
2127 *budget -= work_done; 2140 *budget -= work_done;
2128 dev0->quota -= work_done; 2141 dev0->quota -= work_done;
2129 2142
@@ -2133,9 +2146,24 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2133 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2146 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2134 } 2147 }
2135 2148
2136 netif_rx_complete(dev0); 2149 mod_timer(&hw->idle_timer, jiffies + HZ);
2150
2151 local_irq_disable();
2152 __netif_rx_complete(dev0);
2137 2153
2138 status = sky2_read32(hw, B0_Y2_SP_LISR); 2154 status = sky2_read32(hw, B0_Y2_SP_LISR);
2155
2156 if (unlikely(status)) {
2157 /* More work pending, try and keep going */
2158 if (__netif_rx_schedule_prep(dev0)) {
2159 __netif_rx_reschedule(dev0, work_done);
2160 status = sky2_read32(hw, B0_Y2_SP_EISR);
2161 local_irq_enable();
2162 goto restart_poll;
2163 }
2164 }
2165
2166 local_irq_enable();
2139 return 0; 2167 return 0;
2140} 2168}
2141 2169
@@ -2153,8 +2181,6 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2153 prefetch(&hw->st_le[hw->st_idx]); 2181 prefetch(&hw->st_le[hw->st_idx]);
2154 if (likely(__netif_rx_schedule_prep(dev0))) 2182 if (likely(__netif_rx_schedule_prep(dev0)))
2155 __netif_rx_schedule(dev0); 2183 __netif_rx_schedule(dev0);
2156 else
2157 printk(KERN_DEBUG PFX "irq race detected\n");
2158 2184
2159 return IRQ_HANDLED; 2185 return IRQ_HANDLED;
2160} 2186}
@@ -2193,7 +2219,7 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2193} 2219}
2194 2220
2195 2221
2196static int sky2_reset(struct sky2_hw *hw) 2222static int __devinit sky2_reset(struct sky2_hw *hw)
2197{ 2223{
2198 u16 status; 2224 u16 status;
2199 u8 t8, pmd_type; 2225 u8 t8, pmd_type;
@@ -3276,6 +3302,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3276 3302
3277 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3303 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3278 3304
3305 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) dev);
3306
3279 pci_set_drvdata(pdev, hw); 3307 pci_set_drvdata(pdev, hw);
3280 3308
3281 return 0; 3309 return 0;
@@ -3311,13 +3339,15 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3311 if (!hw) 3339 if (!hw)
3312 return; 3340 return;
3313 3341
3342 del_timer_sync(&hw->idle_timer);
3343
3344 sky2_write32(hw, B0_IMSK, 0);
3314 dev0 = hw->dev[0]; 3345 dev0 = hw->dev[0];
3315 dev1 = hw->dev[1]; 3346 dev1 = hw->dev[1];
3316 if (dev1) 3347 if (dev1)
3317 unregister_netdev(dev1); 3348 unregister_netdev(dev1);
3318 unregister_netdev(dev0); 3349 unregister_netdev(dev0);
3319 3350
3320 sky2_write32(hw, B0_IMSK, 0);
3321 sky2_set_power_state(hw, PCI_D3hot); 3351 sky2_set_power_state(hw, PCI_D3hot);
3322 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 3352 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
3323 sky2_write8(hw, B0_CTST, CS_RST_SET); 3353 sky2_write8(hw, B0_CTST, CS_RST_SET);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 89dd18cd12f0..b026f5653f04 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1880,6 +1880,8 @@ struct sky2_hw {
1880 struct sky2_status_le *st_le; 1880 struct sky2_status_le *st_le;
1881 u32 st_idx; 1881 u32 st_idx;
1882 dma_addr_t st_dma; 1882 dma_addr_t st_dma;
1883
1884 struct timer_list idle_timer;
1883 int msi_detected; 1885 int msi_detected;
1884 wait_queue_head_t msi_wait; 1886 wait_queue_head_t msi_wait;
1885}; 1887};
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 73e271e59c6a..beeb612be98f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.56" 72#define DRV_MODULE_VERSION "3.57"
73#define DRV_MODULE_RELDATE "Apr 1, 2006" 73#define DRV_MODULE_RELDATE "Apr 28, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -974,6 +974,8 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
974 return err; 974 return err;
975} 975}
976 976
977static void tg3_link_report(struct tg3 *);
978
977/* This will reset the tigon3 PHY if there is no valid 979/* This will reset the tigon3 PHY if there is no valid
978 * link unless the FORCE argument is non-zero. 980 * link unless the FORCE argument is non-zero.
979 */ 981 */
@@ -987,6 +989,11 @@ static int tg3_phy_reset(struct tg3 *tp)
987 if (err != 0) 989 if (err != 0)
988 return -EBUSY; 990 return -EBUSY;
989 991
992 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
993 netif_carrier_off(tp->dev);
994 tg3_link_report(tp);
995 }
996
990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@@ -1023,6 +1030,12 @@ out:
1023 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); 1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1024 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1031 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1025 } 1032 }
1033 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1034 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1035 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1036 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1037 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1038 }
1026 /* Set Extended packet length bit (bit 14) on all chips that */ 1039 /* Set Extended packet length bit (bit 14) on all chips that */
1027 /* support jumbo frames */ 1040 /* support jumbo frames */
1028 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 1041 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
@@ -3531,7 +3544,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3531 return IRQ_RETVAL(0); 3544 return IRQ_RETVAL(0);
3532} 3545}
3533 3546
3534static int tg3_init_hw(struct tg3 *); 3547static int tg3_init_hw(struct tg3 *, int);
3535static int tg3_halt(struct tg3 *, int, int); 3548static int tg3_halt(struct tg3 *, int, int);
3536 3549
3537#ifdef CONFIG_NET_POLL_CONTROLLER 3550#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3567,7 +3580,7 @@ static void tg3_reset_task(void *_data)
3567 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3580 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3568 3581
3569 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3582 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3570 tg3_init_hw(tp); 3583 tg3_init_hw(tp, 1);
3571 3584
3572 tg3_netif_start(tp); 3585 tg3_netif_start(tp);
3573 3586
@@ -4042,7 +4055,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4042 4055
4043 tg3_set_mtu(dev, tp, new_mtu); 4056 tg3_set_mtu(dev, tp, new_mtu);
4044 4057
4045 tg3_init_hw(tp); 4058 tg3_init_hw(tp, 0);
4046 4059
4047 tg3_netif_start(tp); 4060 tg3_netif_start(tp);
4048 4061
@@ -5719,9 +5732,23 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5719 if (!netif_running(dev)) 5732 if (!netif_running(dev))
5720 return 0; 5733 return 0;
5721 5734
5722 spin_lock_bh(&tp->lock); 5735 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5723 __tg3_set_mac_addr(tp); 5736 /* Reset chip so that ASF can re-init any MAC addresses it
5724 spin_unlock_bh(&tp->lock); 5737 * needs.
5738 */
5739 tg3_netif_stop(tp);
5740 tg3_full_lock(tp, 1);
5741
5742 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5743 tg3_init_hw(tp, 0);
5744
5745 tg3_netif_start(tp);
5746 tg3_full_unlock(tp);
5747 } else {
5748 spin_lock_bh(&tp->lock);
5749 __tg3_set_mac_addr(tp);
5750 spin_unlock_bh(&tp->lock);
5751 }
5725 5752
5726 return 0; 5753 return 0;
5727} 5754}
@@ -5771,7 +5798,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5771} 5798}
5772 5799
5773/* tp->lock is held. */ 5800/* tp->lock is held. */
5774static int tg3_reset_hw(struct tg3 *tp) 5801static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5775{ 5802{
5776 u32 val, rdmac_mode; 5803 u32 val, rdmac_mode;
5777 int i, err, limit; 5804 int i, err, limit;
@@ -5786,7 +5813,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5786 tg3_abort_hw(tp, 1); 5813 tg3_abort_hw(tp, 1);
5787 } 5814 }
5788 5815
5789 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 5816 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5790 tg3_phy_reset(tp); 5817 tg3_phy_reset(tp);
5791 5818
5792 err = tg3_chip_reset(tp); 5819 err = tg3_chip_reset(tp);
@@ -6327,7 +6354,7 @@ static int tg3_reset_hw(struct tg3 *tp)
6327 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 6354 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6328 } 6355 }
6329 6356
6330 err = tg3_setup_phy(tp, 1); 6357 err = tg3_setup_phy(tp, reset_phy);
6331 if (err) 6358 if (err)
6332 return err; 6359 return err;
6333 6360
@@ -6400,7 +6427,7 @@ static int tg3_reset_hw(struct tg3 *tp)
6400/* Called at device open time to get the chip ready for 6427/* Called at device open time to get the chip ready for
6401 * packet processing. Invoked with tp->lock held. 6428 * packet processing. Invoked with tp->lock held.
6402 */ 6429 */
6403static int tg3_init_hw(struct tg3 *tp) 6430static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6404{ 6431{
6405 int err; 6432 int err;
6406 6433
@@ -6413,7 +6440,7 @@ static int tg3_init_hw(struct tg3 *tp)
6413 6440
6414 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 6441 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6415 6442
6416 err = tg3_reset_hw(tp); 6443 err = tg3_reset_hw(tp, reset_phy);
6417 6444
6418out: 6445out:
6419 return err; 6446 return err;
@@ -6683,7 +6710,7 @@ static int tg3_test_msi(struct tg3 *tp)
6683 tg3_full_lock(tp, 1); 6710 tg3_full_lock(tp, 1);
6684 6711
6685 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6712 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6686 err = tg3_init_hw(tp); 6713 err = tg3_init_hw(tp, 1);
6687 6714
6688 tg3_full_unlock(tp); 6715 tg3_full_unlock(tp);
6689 6716
@@ -6748,7 +6775,7 @@ static int tg3_open(struct net_device *dev)
6748 6775
6749 tg3_full_lock(tp, 0); 6776 tg3_full_lock(tp, 0);
6750 6777
6751 err = tg3_init_hw(tp); 6778 err = tg3_init_hw(tp, 1);
6752 if (err) { 6779 if (err) {
6753 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6780 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6754 tg3_free_rings(tp); 6781 tg3_free_rings(tp);
@@ -7839,7 +7866,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7839 7866
7840 if (netif_running(dev)) { 7867 if (netif_running(dev)) {
7841 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7868 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7842 tg3_init_hw(tp); 7869 tg3_init_hw(tp, 1);
7843 tg3_netif_start(tp); 7870 tg3_netif_start(tp);
7844 } 7871 }
7845 7872
@@ -7884,7 +7911,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7884 7911
7885 if (netif_running(dev)) { 7912 if (netif_running(dev)) {
7886 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7913 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7887 tg3_init_hw(tp); 7914 tg3_init_hw(tp, 1);
7888 tg3_netif_start(tp); 7915 tg3_netif_start(tp);
7889 } 7916 }
7890 7917
@@ -8522,7 +8549,7 @@ static int tg3_test_loopback(struct tg3 *tp)
8522 if (!netif_running(tp->dev)) 8549 if (!netif_running(tp->dev))
8523 return TG3_LOOPBACK_FAILED; 8550 return TG3_LOOPBACK_FAILED;
8524 8551
8525 tg3_reset_hw(tp); 8552 tg3_reset_hw(tp, 1);
8526 8553
8527 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 8554 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8528 err |= TG3_MAC_LOOPBACK_FAILED; 8555 err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8596,7 +8623,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8596 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8623 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8597 if (netif_running(dev)) { 8624 if (netif_running(dev)) {
8598 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8625 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8599 tg3_init_hw(tp); 8626 tg3_init_hw(tp, 1);
8600 tg3_netif_start(tp); 8627 tg3_netif_start(tp);
8601 } 8628 }
8602 8629
@@ -9377,7 +9404,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9377 9404
9378 if ((page_off == 0) || (i == 0)) 9405 if ((page_off == 0) || (i == 0))
9379 nvram_cmd |= NVRAM_CMD_FIRST; 9406 nvram_cmd |= NVRAM_CMD_FIRST;
9380 else if (page_off == (tp->nvram_pagesize - 4)) 9407 if (page_off == (tp->nvram_pagesize - 4))
9381 nvram_cmd |= NVRAM_CMD_LAST; 9408 nvram_cmd |= NVRAM_CMD_LAST;
9382 9409
9383 if (i == (len - 4)) 9410 if (i == (len - 4))
@@ -10353,10 +10380,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10353 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) 10380 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10354 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; 10381 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10355 10382
10356 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 10383 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10357 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && 10384 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10358 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)) 10385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10359 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 10386 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10387 else
10388 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10389 }
10360 10390
10361 tp->coalesce_mode = 0; 10391 tp->coalesce_mode = 0;
10362 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 10392 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
@@ -11569,7 +11599,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11569 tg3_full_lock(tp, 0); 11599 tg3_full_lock(tp, 0);
11570 11600
11571 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11601 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11572 tg3_init_hw(tp); 11602 tg3_init_hw(tp, 1);
11573 11603
11574 tp->timer.expires = jiffies + tp->timer_offset; 11604 tp->timer.expires = jiffies + tp->timer_offset;
11575 add_timer(&tp->timer); 11605 add_timer(&tp->timer);
@@ -11603,7 +11633,7 @@ static int tg3_resume(struct pci_dev *pdev)
11603 tg3_full_lock(tp, 0); 11633 tg3_full_lock(tp, 0);
11604 11634
11605 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11635 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11606 tg3_init_hw(tp); 11636 tg3_init_hw(tp, 1);
11607 11637
11608 tp->timer.expires = jiffies + tp->timer_offset; 11638 tp->timer.expires = jiffies + tp->timer_offset;
11609 add_timer(&tp->timer); 11639 add_timer(&tp->timer);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8c8b987d1250..0e29b885d449 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2215,6 +2215,7 @@ struct tg3 {
2215#define TG3_FLG2_HW_TSO_2 0x08000000 2215#define TG3_FLG2_HW_TSO_2 0x08000000
2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) 2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
2217#define TG3_FLG2_1SHOT_MSI 0x10000000 2217#define TG3_FLG2_1SHOT_MSI 0x10000000
2218#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2218 2219
2219 u32 split_mode_max_reqs; 2220 u32 split_mode_max_reqs;
2220#define SPLIT_MODE_5704_MAX_REQ 3 2221#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 6a23964c1317..a6dc53b4250d 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -129,6 +129,7 @@
129 - Massive clean-up 129 - Massive clean-up
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff) 130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good 131 - Fix Tx engine race for good
132 - Craig Brind: Zero padded aligned buffers for short packets.
132 133
133*/ 134*/
134 135
@@ -1326,7 +1327,12 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1326 rp->stats.tx_dropped++; 1327 rp->stats.tx_dropped++;
1327 return 0; 1328 return 0;
1328 } 1329 }
1330
1331 /* Padding is not copied and so must be redone. */
1329 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); 1332 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1333 if (skb->len < ETH_ZLEN)
1334 memset(rp->tx_buf[entry] + skb->len, 0,
1335 ETH_ZLEN - skb->len);
1330 rp->tx_skbuff_dma[entry] = 0; 1336 rp->tx_skbuff_dma[entry] = 0;
1331 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + 1337 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1332 (rp->tx_buf[entry] - 1338 (rp->tx_buf[entry] -