aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/forcedeth.c27
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c28
-rw-r--r--drivers/net/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/ibm_newemac/mal.c2
-rw-r--r--drivers/net/pcnet32.c58
-rw-r--r--drivers/net/sky2.c46
-rw-r--r--include/linux/netdevice.h18
10 files changed, 108 insertions, 79 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 862f47223fd..6f8e7d4cf74 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1491,7 +1491,7 @@ vortex_up(struct net_device *dev)
1491 struct vortex_private *vp = netdev_priv(dev); 1491 struct vortex_private *vp = netdev_priv(dev);
1492 void __iomem *ioaddr = vp->ioaddr; 1492 void __iomem *ioaddr = vp->ioaddr;
1493 unsigned int config; 1493 unsigned int config;
1494 int i, mii_reg1, mii_reg5, err; 1494 int i, mii_reg1, mii_reg5, err = 0;
1495 1495
1496 if (VORTEX_PCI(vp)) { 1496 if (VORTEX_PCI(vp)) {
1497 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ 1497 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cfbb7aacfe9..70ddf1acfd8 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -992,7 +992,7 @@ static void nv_enable_irq(struct net_device *dev)
992 if (np->msi_flags & NV_MSI_X_ENABLED) 992 if (np->msi_flags & NV_MSI_X_ENABLED)
993 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 993 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
994 else 994 else
995 enable_irq(dev->irq); 995 enable_irq(np->pci_dev->irq);
996 } else { 996 } else {
997 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 997 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
998 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 998 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
@@ -1008,7 +1008,7 @@ static void nv_disable_irq(struct net_device *dev)
1008 if (np->msi_flags & NV_MSI_X_ENABLED) 1008 if (np->msi_flags & NV_MSI_X_ENABLED)
1009 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1009 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1010 else 1010 else
1011 disable_irq(dev->irq); 1011 disable_irq(np->pci_dev->irq);
1012 } else { 1012 } else {
1013 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1013 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1014 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1014 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
@@ -1607,7 +1607,7 @@ static void nv_do_rx_refill(unsigned long data)
1607 if (np->msi_flags & NV_MSI_X_ENABLED) 1607 if (np->msi_flags & NV_MSI_X_ENABLED)
1608 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1608 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1609 else 1609 else
1610 disable_irq(dev->irq); 1610 disable_irq(np->pci_dev->irq);
1611 } else { 1611 } else {
1612 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1612 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1613 } 1613 }
@@ -1625,7 +1625,7 @@ static void nv_do_rx_refill(unsigned long data)
1625 if (np->msi_flags & NV_MSI_X_ENABLED) 1625 if (np->msi_flags & NV_MSI_X_ENABLED)
1626 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1626 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1627 else 1627 else
1628 enable_irq(dev->irq); 1628 enable_irq(np->pci_dev->irq);
1629 } else { 1629 } else {
1630 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1630 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1631 } 1631 }
@@ -2408,13 +2408,13 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2408 struct fe_priv *np = netdev_priv(dev); 2408 struct fe_priv *np = netdev_priv(dev);
2409 u32 flags; 2409 u32 flags;
2410 u32 vlanflags = 0; 2410 u32 vlanflags = 0;
2411 u32 rx_processed_cnt = 0; 2411 int rx_work = 0;
2412 struct sk_buff *skb; 2412 struct sk_buff *skb;
2413 int len; 2413 int len;
2414 2414
2415 while((np->get_rx.ex != np->put_rx.ex) && 2415 while((np->get_rx.ex != np->put_rx.ex) &&
2416 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2416 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2417 (rx_processed_cnt++ < limit)) { 2417 (rx_work < limit)) {
2418 2418
2419 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2419 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2420 dev->name, flags); 2420 dev->name, flags);
@@ -2517,9 +2517,11 @@ next_pkt:
2517 np->get_rx.ex = np->first_rx.ex; 2517 np->get_rx.ex = np->first_rx.ex;
2518 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2518 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2519 np->get_rx_ctx = np->first_rx_ctx; 2519 np->get_rx_ctx = np->first_rx_ctx;
2520
2521 rx_work++;
2520 } 2522 }
2521 2523
2522 return rx_processed_cnt; 2524 return rx_work;
2523} 2525}
2524 2526
2525static void set_bufsize(struct net_device *dev) 2527static void set_bufsize(struct net_device *dev)
@@ -3558,10 +3560,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3558 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3560 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3559 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3561 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3560 np->msi_flags |= NV_MSI_ENABLED; 3562 np->msi_flags |= NV_MSI_ENABLED;
3563 dev->irq = np->pci_dev->irq;
3561 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3564 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3562 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3565 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3563 pci_disable_msi(np->pci_dev); 3566 pci_disable_msi(np->pci_dev);
3564 np->msi_flags &= ~NV_MSI_ENABLED; 3567 np->msi_flags &= ~NV_MSI_ENABLED;
3568 dev->irq = np->pci_dev->irq;
3565 goto out_err; 3569 goto out_err;
3566 } 3570 }
3567 3571
@@ -3624,7 +3628,7 @@ static void nv_do_nic_poll(unsigned long data)
3624 if (np->msi_flags & NV_MSI_X_ENABLED) 3628 if (np->msi_flags & NV_MSI_X_ENABLED)
3625 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3629 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3626 else 3630 else
3627 disable_irq_lockdep(dev->irq); 3631 disable_irq_lockdep(np->pci_dev->irq);
3628 mask = np->irqmask; 3632 mask = np->irqmask;
3629 } else { 3633 } else {
3630 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3634 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -3642,6 +3646,8 @@ static void nv_do_nic_poll(unsigned long data)
3642 } 3646 }
3643 np->nic_poll_irq = 0; 3647 np->nic_poll_irq = 0;
3644 3648
3649 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3650
3645 if (np->recover_error) { 3651 if (np->recover_error) {
3646 np->recover_error = 0; 3652 np->recover_error = 0;
3647 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); 3653 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
@@ -3678,7 +3684,6 @@ static void nv_do_nic_poll(unsigned long data)
3678 } 3684 }
3679 } 3685 }
3680 3686
3681 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3682 3687
3683 writel(mask, base + NvRegIrqMask); 3688 writel(mask, base + NvRegIrqMask);
3684 pci_push(base); 3689 pci_push(base);
@@ -3691,7 +3696,7 @@ static void nv_do_nic_poll(unsigned long data)
3691 if (np->msi_flags & NV_MSI_X_ENABLED) 3696 if (np->msi_flags & NV_MSI_X_ENABLED)
3692 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3697 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3693 else 3698 else
3694 enable_irq_lockdep(dev->irq); 3699 enable_irq_lockdep(np->pci_dev->irq);
3695 } else { 3700 } else {
3696 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3701 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3697 nv_nic_irq_rx(0, dev); 3702 nv_nic_irq_rx(0, dev);
@@ -4948,7 +4953,7 @@ static int nv_close(struct net_device *dev)
4948#ifdef CONFIG_FORCEDETH_NAPI 4953#ifdef CONFIG_FORCEDETH_NAPI
4949 napi_disable(&np->napi); 4954 napi_disable(&np->napi);
4950#endif 4955#endif
4951 synchronize_irq(dev->irq); 4956 synchronize_irq(np->pci_dev->irq);
4952 4957
4953 del_timer_sync(&np->oom_kick); 4958 del_timer_sync(&np->oom_kick);
4954 del_timer_sync(&np->nic_poll); 4959 del_timer_sync(&np->nic_poll);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 04c6faec88d..f2a4d399a6e 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -88,7 +88,7 @@ static void skb_align(struct sk_buff *skb, int align)
88static int fs_enet_rx_napi(struct napi_struct *napi, int budget) 88static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
89{ 89{
90 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); 90 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
91 struct net_device *dev = to_net_dev(fep->dev); 91 struct net_device *dev = fep->ndev;
92 const struct fs_platform_info *fpi = fep->fpi; 92 const struct fs_platform_info *fpi = fep->fpi;
93 cbd_t __iomem *bdp; 93 cbd_t __iomem *bdp;
94 struct sk_buff *skb, *skbn, *skbt; 94 struct sk_buff *skb, *skbn, *skbt;
@@ -217,7 +217,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
217 217
218 fep->cur_rx = bdp; 218 fep->cur_rx = bdp;
219 219
220 if (received >= budget) { 220 if (received < budget) {
221 /* done */ 221 /* done */
222 netif_rx_complete(dev, napi); 222 netif_rx_complete(dev, napi);
223 (*fep->ops->napi_enable_rx)(dev); 223 (*fep->ops->napi_enable_rx)(dev);
@@ -807,20 +807,23 @@ static int fs_enet_open(struct net_device *dev)
807 int r; 807 int r;
808 int err; 808 int err;
809 809
810 napi_enable(&fep->napi); 810 if (fep->fpi->use_napi)
811 napi_enable(&fep->napi);
811 812
812 /* Install our interrupt handler. */ 813 /* Install our interrupt handler. */
813 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); 814 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
814 if (r != 0) { 815 if (r != 0) {
815 printk(KERN_ERR DRV_MODULE_NAME 816 printk(KERN_ERR DRV_MODULE_NAME
816 ": %s Could not allocate FS_ENET IRQ!", dev->name); 817 ": %s Could not allocate FS_ENET IRQ!", dev->name);
817 napi_disable(&fep->napi); 818 if (fep->fpi->use_napi)
819 napi_disable(&fep->napi);
818 return -EINVAL; 820 return -EINVAL;
819 } 821 }
820 822
821 err = fs_init_phy(dev); 823 err = fs_init_phy(dev);
822 if(err) { 824 if (err) {
823 napi_disable(&fep->napi); 825 if (fep->fpi->use_napi)
826 napi_disable(&fep->napi);
824 return err; 827 return err;
825 } 828 }
826 phy_start(fep->phydev); 829 phy_start(fep->phydev);
@@ -1232,7 +1235,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1232 fpi->rx_ring = 32; 1235 fpi->rx_ring = 32;
1233 fpi->tx_ring = 32; 1236 fpi->tx_ring = 32;
1234 fpi->rx_copybreak = 240; 1237 fpi->rx_copybreak = 240;
1235 fpi->use_napi = 0; 1238 fpi->use_napi = 1;
1236 fpi->napi_weight = 17; 1239 fpi->napi_weight = 17;
1237 1240
1238 ret = find_phy(ofdev->node, fpi); 1241 ret = find_phy(ofdev->node, fpi);
@@ -1249,11 +1252,11 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1249 goto out_free_fpi; 1252 goto out_free_fpi;
1250 } 1253 }
1251 1254
1252 SET_MODULE_OWNER(ndev);
1253 dev_set_drvdata(&ofdev->dev, ndev); 1255 dev_set_drvdata(&ofdev->dev, ndev);
1254 1256
1255 fep = netdev_priv(ndev); 1257 fep = netdev_priv(ndev);
1256 fep->dev = &ofdev->dev; 1258 fep->dev = &ofdev->dev;
1259 fep->ndev = ndev;
1257 fep->fpi = fpi; 1260 fep->fpi = fpi;
1258 fep->ops = match->data; 1261 fep->ops = match->data;
1259 1262
@@ -1288,10 +1291,11 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1288 ndev->stop = fs_enet_close; 1291 ndev->stop = fs_enet_close;
1289 ndev->get_stats = fs_enet_get_stats; 1292 ndev->get_stats = fs_enet_get_stats;
1290 ndev->set_multicast_list = fs_set_multicast_list; 1293 ndev->set_multicast_list = fs_set_multicast_list;
1291 if (fpi->use_napi) { 1294
1292 ndev->poll = fs_enet_rx_napi; 1295 if (fpi->use_napi)
1293 ndev->weight = fpi->napi_weight; 1296 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1294 } 1297 fpi->napi_weight);
1298
1295 ndev->ethtool_ops = &fs_ethtool_ops; 1299 ndev->ethtool_ops = &fs_ethtool_ops;
1296 ndev->do_ioctl = fs_ioctl; 1300 ndev->do_ioctl = fs_ioctl;
1297 1301
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index baf6477165a..c675e29aadc 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -75,6 +75,7 @@ struct phy_info {
75struct fs_enet_private { 75struct fs_enet_private {
76 struct napi_struct napi; 76 struct napi_struct napi;
77 struct device *dev; /* pointer back to the device (must be initialized first) */ 77 struct device *dev; /* pointer back to the device (must be initialized first) */
78 struct net_device *ndev;
78 spinlock_t lock; /* during all ops except TX pckt processing */ 79 spinlock_t lock; /* during all ops except TX pckt processing */
79 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ 80 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
80 struct fs_platform_info *fpi; 81 struct fs_platform_info *fpi;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index cc288d8f6a5..38268d7335a 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -956,10 +956,12 @@ static int gfar_enet_open(struct net_device *dev)
956 } 956 }
957 957
958 err = startup_gfar(dev); 958 err = startup_gfar(dev);
959 if (err) 959 if (err) {
960#ifdef CONFIG_GFAR_NAPI 960#ifdef CONFIG_GFAR_NAPI
961 napi_disable(&priv->napi); 961 napi_disable(&priv->napi);
962#endif 962#endif
963 return err;
964 }
963 965
964 netif_start_queue(dev); 966 netif_start_queue(dev);
965 967
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index c16cc8b946a..46cd7735e6f 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -749,7 +749,6 @@ struct gfar_private {
749 uint32_t msg_enable; 749 uint32_t msg_enable;
750 750
751 /* Network Statistics */ 751 /* Network Statistics */
752 struct net_device_stats stats;
753 struct gfar_extra_stats extra_stats; 752 struct gfar_extra_stats extra_stats;
754}; 753};
755 754
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index a680eb05ba6..9a88f71db00 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -322,7 +322,7 @@ void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
322 msleep(1); 322 msleep(1);
323 323
324 /* Synchronize with the MAL NAPI poller */ 324 /* Synchronize with the MAL NAPI poller */
325 __napi_synchronize(&mal->napi); 325 napi_synchronize(&mal->napi);
326} 326}
327 327
328void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) 328void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 5f994b5beda..ff92aca0a7b 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -282,7 +282,6 @@ struct pcnet32_private {
282 282
283 struct net_device *dev; 283 struct net_device *dev;
284 struct napi_struct napi; 284 struct napi_struct napi;
285 struct net_device_stats stats;
286 char tx_full; 285 char tx_full;
287 char phycount; /* number of phys found */ 286 char phycount; /* number of phys found */
288 int options; 287 int options;
@@ -442,7 +441,9 @@ static struct pcnet32_access pcnet32_dwio = {
442 441
443static void pcnet32_netif_stop(struct net_device *dev) 442static void pcnet32_netif_stop(struct net_device *dev)
444{ 443{
444#ifdef CONFIG_PCNET32_NAPI
445 struct pcnet32_private *lp = netdev_priv(dev); 445 struct pcnet32_private *lp = netdev_priv(dev);
446#endif
446 dev->trans_start = jiffies; 447 dev->trans_start = jiffies;
447#ifdef CONFIG_PCNET32_NAPI 448#ifdef CONFIG_PCNET32_NAPI
448 napi_disable(&lp->napi); 449 napi_disable(&lp->napi);
@@ -452,7 +453,9 @@ static void pcnet32_netif_stop(struct net_device *dev)
452 453
453static void pcnet32_netif_start(struct net_device *dev) 454static void pcnet32_netif_start(struct net_device *dev)
454{ 455{
456#ifdef CONFIG_PCNET32_NAPI
455 struct pcnet32_private *lp = netdev_priv(dev); 457 struct pcnet32_private *lp = netdev_priv(dev);
458#endif
456 netif_wake_queue(dev); 459 netif_wake_queue(dev);
457#ifdef CONFIG_PCNET32_NAPI 460#ifdef CONFIG_PCNET32_NAPI
458 napi_enable(&lp->napi); 461 napi_enable(&lp->napi);
@@ -1178,15 +1181,15 @@ static void pcnet32_rx_entry(struct net_device *dev,
1178 * buffers, with only the last correctly noting the error. 1181 * buffers, with only the last correctly noting the error.
1179 */ 1182 */
1180 if (status & 0x01) /* Only count a general error at the */ 1183 if (status & 0x01) /* Only count a general error at the */
1181 lp->stats.rx_errors++; /* end of a packet. */ 1184 dev->stats.rx_errors++; /* end of a packet. */
1182 if (status & 0x20) 1185 if (status & 0x20)
1183 lp->stats.rx_frame_errors++; 1186 dev->stats.rx_frame_errors++;
1184 if (status & 0x10) 1187 if (status & 0x10)
1185 lp->stats.rx_over_errors++; 1188 dev->stats.rx_over_errors++;
1186 if (status & 0x08) 1189 if (status & 0x08)
1187 lp->stats.rx_crc_errors++; 1190 dev->stats.rx_crc_errors++;
1188 if (status & 0x04) 1191 if (status & 0x04)
1189 lp->stats.rx_fifo_errors++; 1192 dev->stats.rx_fifo_errors++;
1190 return; 1193 return;
1191 } 1194 }
1192 1195
@@ -1197,13 +1200,13 @@ static void pcnet32_rx_entry(struct net_device *dev,
1197 if (netif_msg_drv(lp)) 1200 if (netif_msg_drv(lp))
1198 printk(KERN_ERR "%s: Impossible packet size %d!\n", 1201 printk(KERN_ERR "%s: Impossible packet size %d!\n",
1199 dev->name, pkt_len); 1202 dev->name, pkt_len);
1200 lp->stats.rx_errors++; 1203 dev->stats.rx_errors++;
1201 return; 1204 return;
1202 } 1205 }
1203 if (pkt_len < 60) { 1206 if (pkt_len < 60) {
1204 if (netif_msg_rx_err(lp)) 1207 if (netif_msg_rx_err(lp))
1205 printk(KERN_ERR "%s: Runt packet!\n", dev->name); 1208 printk(KERN_ERR "%s: Runt packet!\n", dev->name);
1206 lp->stats.rx_errors++; 1209 dev->stats.rx_errors++;
1207 return; 1210 return;
1208 } 1211 }
1209 1212
@@ -1237,7 +1240,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1237 printk(KERN_ERR 1240 printk(KERN_ERR
1238 "%s: Memory squeeze, dropping packet.\n", 1241 "%s: Memory squeeze, dropping packet.\n",
1239 dev->name); 1242 dev->name);
1240 lp->stats.rx_dropped++; 1243 dev->stats.rx_dropped++;
1241 return; 1244 return;
1242 } 1245 }
1243 skb->dev = dev; 1246 skb->dev = dev;
@@ -1256,7 +1259,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1256 pkt_len, 1259 pkt_len,
1257 PCI_DMA_FROMDEVICE); 1260 PCI_DMA_FROMDEVICE);
1258 } 1261 }
1259 lp->stats.rx_bytes += skb->len; 1262 dev->stats.rx_bytes += skb->len;
1260 skb->protocol = eth_type_trans(skb, dev); 1263 skb->protocol = eth_type_trans(skb, dev);
1261#ifdef CONFIG_PCNET32_NAPI 1264#ifdef CONFIG_PCNET32_NAPI
1262 netif_receive_skb(skb); 1265 netif_receive_skb(skb);
@@ -1264,7 +1267,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1264 netif_rx(skb); 1267 netif_rx(skb);
1265#endif 1268#endif
1266 dev->last_rx = jiffies; 1269 dev->last_rx = jiffies;
1267 lp->stats.rx_packets++; 1270 dev->stats.rx_packets++;
1268 return; 1271 return;
1269} 1272}
1270 1273
@@ -1312,21 +1315,21 @@ static int pcnet32_tx(struct net_device *dev)
1312 if (status & 0x4000) { 1315 if (status & 0x4000) {
1313 /* There was a major error, log it. */ 1316 /* There was a major error, log it. */
1314 int err_status = le32_to_cpu(lp->tx_ring[entry].misc); 1317 int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
1315 lp->stats.tx_errors++; 1318 dev->stats.tx_errors++;
1316 if (netif_msg_tx_err(lp)) 1319 if (netif_msg_tx_err(lp))
1317 printk(KERN_ERR 1320 printk(KERN_ERR
1318 "%s: Tx error status=%04x err_status=%08x\n", 1321 "%s: Tx error status=%04x err_status=%08x\n",
1319 dev->name, status, 1322 dev->name, status,
1320 err_status); 1323 err_status);
1321 if (err_status & 0x04000000) 1324 if (err_status & 0x04000000)
1322 lp->stats.tx_aborted_errors++; 1325 dev->stats.tx_aborted_errors++;
1323 if (err_status & 0x08000000) 1326 if (err_status & 0x08000000)
1324 lp->stats.tx_carrier_errors++; 1327 dev->stats.tx_carrier_errors++;
1325 if (err_status & 0x10000000) 1328 if (err_status & 0x10000000)
1326 lp->stats.tx_window_errors++; 1329 dev->stats.tx_window_errors++;
1327#ifndef DO_DXSUFLO 1330#ifndef DO_DXSUFLO
1328 if (err_status & 0x40000000) { 1331 if (err_status & 0x40000000) {
1329 lp->stats.tx_fifo_errors++; 1332 dev->stats.tx_fifo_errors++;
1330 /* Ackk! On FIFO errors the Tx unit is turned off! */ 1333 /* Ackk! On FIFO errors the Tx unit is turned off! */
1331 /* Remove this verbosity later! */ 1334 /* Remove this verbosity later! */
1332 if (netif_msg_tx_err(lp)) 1335 if (netif_msg_tx_err(lp))
@@ -1337,7 +1340,7 @@ static int pcnet32_tx(struct net_device *dev)
1337 } 1340 }
1338#else 1341#else
1339 if (err_status & 0x40000000) { 1342 if (err_status & 0x40000000) {
1340 lp->stats.tx_fifo_errors++; 1343 dev->stats.tx_fifo_errors++;
1341 if (!lp->dxsuflo) { /* If controller doesn't recover ... */ 1344 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
1342 /* Ackk! On FIFO errors the Tx unit is turned off! */ 1345 /* Ackk! On FIFO errors the Tx unit is turned off! */
1343 /* Remove this verbosity later! */ 1346 /* Remove this verbosity later! */
@@ -1351,8 +1354,8 @@ static int pcnet32_tx(struct net_device *dev)
1351#endif 1354#endif
1352 } else { 1355 } else {
1353 if (status & 0x1800) 1356 if (status & 0x1800)
1354 lp->stats.collisions++; 1357 dev->stats.collisions++;
1355 lp->stats.tx_packets++; 1358 dev->stats.tx_packets++;
1356 } 1359 }
1357 1360
1358 /* We must free the original skb */ 1361 /* We must free the original skb */
@@ -1849,6 +1852,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1849 lp->mii_if.mdio_read = mdio_read; 1852 lp->mii_if.mdio_read = mdio_read;
1850 lp->mii_if.mdio_write = mdio_write; 1853 lp->mii_if.mdio_write = mdio_write;
1851 1854
1855 /* napi.weight is used in both the napi and non-napi cases */
1856 lp->napi.weight = lp->rx_ring_size / 2;
1857
1852#ifdef CONFIG_PCNET32_NAPI 1858#ifdef CONFIG_PCNET32_NAPI
1853 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); 1859 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
1854#endif 1860#endif
@@ -2471,7 +2477,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
2471 "%s: transmit timed out, status %4.4x, resetting.\n", 2477 "%s: transmit timed out, status %4.4x, resetting.\n",
2472 dev->name, lp->a.read_csr(ioaddr, CSR0)); 2478 dev->name, lp->a.read_csr(ioaddr, CSR0));
2473 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); 2479 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
2474 lp->stats.tx_errors++; 2480 dev->stats.tx_errors++;
2475 if (netif_msg_tx_err(lp)) { 2481 if (netif_msg_tx_err(lp)) {
2476 int i; 2482 int i;
2477 printk(KERN_DEBUG 2483 printk(KERN_DEBUG
@@ -2541,7 +2547,7 @@ static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
2541 lp->tx_ring[entry].status = cpu_to_le16(status); 2547 lp->tx_ring[entry].status = cpu_to_le16(status);
2542 2548
2543 lp->cur_tx++; 2549 lp->cur_tx++;
2544 lp->stats.tx_bytes += skb->len; 2550 dev->stats.tx_bytes += skb->len;
2545 2551
2546 /* Trigger an immediate send poll. */ 2552 /* Trigger an immediate send poll. */
2547 lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); 2553 lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
@@ -2586,7 +2592,7 @@ pcnet32_interrupt(int irq, void *dev_id)
2586 2592
2587 /* Log misc errors. */ 2593 /* Log misc errors. */
2588 if (csr0 & 0x4000) 2594 if (csr0 & 0x4000)
2589 lp->stats.tx_errors++; /* Tx babble. */ 2595 dev->stats.tx_errors++; /* Tx babble. */
2590 if (csr0 & 0x1000) { 2596 if (csr0 & 0x1000) {
2591 /* 2597 /*
2592 * This happens when our receive ring is full. This 2598 * This happens when our receive ring is full. This
@@ -2599,7 +2605,7 @@ pcnet32_interrupt(int irq, void *dev_id)
2599 * don't get a rx interrupt, but a missed frame 2605 * don't get a rx interrupt, but a missed frame
2600 * interrupt sooner or later. 2606 * interrupt sooner or later.
2601 */ 2607 */
2602 lp->stats.rx_errors++; /* Missed a Rx frame. */ 2608 dev->stats.rx_errors++; /* Missed a Rx frame. */
2603 } 2609 }
2604 if (csr0 & 0x0800) { 2610 if (csr0 & 0x0800) {
2605 if (netif_msg_drv(lp)) 2611 if (netif_msg_drv(lp))
@@ -2661,7 +2667,7 @@ static int pcnet32_close(struct net_device *dev)
2661 2667
2662 spin_lock_irqsave(&lp->lock, flags); 2668 spin_lock_irqsave(&lp->lock, flags);
2663 2669
2664 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); 2670 dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2665 2671
2666 if (netif_msg_ifdown(lp)) 2672 if (netif_msg_ifdown(lp))
2667 printk(KERN_DEBUG 2673 printk(KERN_DEBUG
@@ -2698,10 +2704,10 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
2698 unsigned long flags; 2704 unsigned long flags;
2699 2705
2700 spin_lock_irqsave(&lp->lock, flags); 2706 spin_lock_irqsave(&lp->lock, flags);
2701 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); 2707 dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2702 spin_unlock_irqrestore(&lp->lock, flags); 2708 spin_unlock_irqrestore(&lp->lock, flags);
2703 2709
2704 return &lp->stats; 2710 return &dev->stats;
2705} 2711}
2706 2712
2707/* taken from the sunlance driver, which it took from the depca driver */ 2713/* taken from the sunlance driver, which it took from the depca driver */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 70d7e478a3e..24cfb6275d9 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1384,13 +1384,9 @@ static int sky2_up(struct net_device *dev)
1384 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1384 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1385 TX_RING_SIZE - 1); 1385 TX_RING_SIZE - 1);
1386 1386
1387 napi_enable(&hw->napi);
1388
1389 err = sky2_rx_start(sky2); 1387 err = sky2_rx_start(sky2);
1390 if (err) { 1388 if (err)
1391 napi_disable(&hw->napi);
1392 goto err_out; 1389 goto err_out;
1393 }
1394 1390
1395 /* Enable interrupts from phy/mac for port */ 1391 /* Enable interrupts from phy/mac for port */
1396 imask = sky2_read32(hw, B0_IMSK); 1392 imask = sky2_read32(hw, B0_IMSK);
@@ -1679,13 +1675,13 @@ static int sky2_down(struct net_device *dev)
1679 /* Stop more packets from being queued */ 1675 /* Stop more packets from being queued */
1680 netif_stop_queue(dev); 1676 netif_stop_queue(dev);
1681 1677
1682 napi_disable(&hw->napi);
1683
1684 /* Disable port IRQ */ 1678 /* Disable port IRQ */
1685 imask = sky2_read32(hw, B0_IMSK); 1679 imask = sky2_read32(hw, B0_IMSK);
1686 imask &= ~portirq_msk[port]; 1680 imask &= ~portirq_msk[port];
1687 sky2_write32(hw, B0_IMSK, imask); 1681 sky2_write32(hw, B0_IMSK, imask);
1688 1682
1683 synchronize_irq(hw->pdev->irq);
1684
1689 sky2_gmac_reset(hw, port); 1685 sky2_gmac_reset(hw, port);
1690 1686
1691 /* Stop transmitter */ 1687 /* Stop transmitter */
@@ -1699,6 +1695,9 @@ static int sky2_down(struct net_device *dev)
1699 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); 1695 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1700 gma_write16(hw, port, GM_GP_CTRL, ctrl); 1696 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1701 1697
1698 /* Make sure no packets are pending */
1699 napi_synchronize(&hw->napi);
1700
1702 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1701 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1703 1702
1704 /* Workaround shared GMAC reset */ 1703 /* Workaround shared GMAC reset */
@@ -1736,8 +1735,6 @@ static int sky2_down(struct net_device *dev)
1736 /* turn off LED's */ 1735 /* turn off LED's */
1737 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 1736 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1738 1737
1739 synchronize_irq(hw->pdev->irq);
1740
1741 sky2_tx_clean(dev); 1738 sky2_tx_clean(dev);
1742 sky2_rx_clean(sky2); 1739 sky2_rx_clean(sky2);
1743 1740
@@ -2048,9 +2045,6 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2048 err = sky2_rx_start(sky2); 2045 err = sky2_rx_start(sky2);
2049 sky2_write32(hw, B0_IMSK, imask); 2046 sky2_write32(hw, B0_IMSK, imask);
2050 2047
2051 /* Unconditionally re-enable NAPI because even if we
2052 * call dev_close() that will do a napi_disable().
2053 */
2054 napi_enable(&hw->napi); 2048 napi_enable(&hw->napi);
2055 2049
2056 if (err) 2050 if (err)
@@ -2915,6 +2909,7 @@ static void sky2_restart(struct work_struct *work)
2915 rtnl_lock(); 2909 rtnl_lock();
2916 sky2_write32(hw, B0_IMSK, 0); 2910 sky2_write32(hw, B0_IMSK, 0);
2917 sky2_read32(hw, B0_IMSK); 2911 sky2_read32(hw, B0_IMSK);
2912 napi_disable(&hw->napi);
2918 2913
2919 for (i = 0; i < hw->ports; i++) { 2914 for (i = 0; i < hw->ports; i++) {
2920 dev = hw->dev[i]; 2915 dev = hw->dev[i];
@@ -2924,6 +2919,7 @@ static void sky2_restart(struct work_struct *work)
2924 2919
2925 sky2_reset(hw); 2920 sky2_reset(hw);
2926 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 2921 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
2922 napi_enable(&hw->napi);
2927 2923
2928 for (i = 0; i < hw->ports; i++) { 2924 for (i = 0; i < hw->ports; i++) {
2929 dev = hw->dev[i]; 2925 dev = hw->dev[i];
@@ -4191,7 +4187,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4191 err = -ENOMEM; 4187 err = -ENOMEM;
4192 goto err_out_free_pci; 4188 goto err_out_free_pci;
4193 } 4189 }
4194 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
4195 4190
4196 if (!disable_msi && pci_enable_msi(pdev) == 0) { 4191 if (!disable_msi && pci_enable_msi(pdev) == 0) {
4197 err = sky2_test_msi(hw); 4192 err = sky2_test_msi(hw);
@@ -4207,6 +4202,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4207 goto err_out_free_netdev; 4202 goto err_out_free_netdev;
4208 } 4203 }
4209 4204
4205 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
4206
4210 err = request_irq(pdev->irq, sky2_intr, 4207 err = request_irq(pdev->irq, sky2_intr,
4211 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, 4208 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
4212 dev->name, hw); 4209 dev->name, hw);
@@ -4215,6 +4212,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4215 goto err_out_unregister; 4212 goto err_out_unregister;
4216 } 4213 }
4217 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 4214 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
4215 napi_enable(&hw->napi);
4218 4216
4219 sky2_show_addr(dev); 4217 sky2_show_addr(dev);
4220 4218
@@ -4265,23 +4263,18 @@ err_out:
4265static void __devexit sky2_remove(struct pci_dev *pdev) 4263static void __devexit sky2_remove(struct pci_dev *pdev)
4266{ 4264{
4267 struct sky2_hw *hw = pci_get_drvdata(pdev); 4265 struct sky2_hw *hw = pci_get_drvdata(pdev);
4268 struct net_device *dev0, *dev1; 4266 int i;
4269 4267
4270 if (!hw) 4268 if (!hw)
4271 return; 4269 return;
4272 4270
4273 del_timer_sync(&hw->watchdog_timer); 4271 del_timer_sync(&hw->watchdog_timer);
4272 cancel_work_sync(&hw->restart_work);
4274 4273
4275 flush_scheduled_work(); 4274 for (i = hw->ports; i >= 0; --i)
4275 unregister_netdev(hw->dev[i]);
4276 4276
4277 sky2_write32(hw, B0_IMSK, 0); 4277 sky2_write32(hw, B0_IMSK, 0);
4278 synchronize_irq(hw->pdev->irq);
4279
4280 dev0 = hw->dev[0];
4281 dev1 = hw->dev[1];
4282 if (dev1)
4283 unregister_netdev(dev1);
4284 unregister_netdev(dev0);
4285 4278
4286 sky2_power_aux(hw); 4279 sky2_power_aux(hw);
4287 4280
@@ -4296,9 +4289,9 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
4296 pci_release_regions(pdev); 4289 pci_release_regions(pdev);
4297 pci_disable_device(pdev); 4290 pci_disable_device(pdev);
4298 4291
4299 if (dev1) 4292 for (i = hw->ports; i >= 0; --i)
4300 free_netdev(dev1); 4293 free_netdev(hw->dev[i]);
4301 free_netdev(dev0); 4294
4302 iounmap(hw->regs); 4295 iounmap(hw->regs);
4303 kfree(hw); 4296 kfree(hw);
4304 4297
@@ -4328,6 +4321,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4328 } 4321 }
4329 4322
4330 sky2_write32(hw, B0_IMSK, 0); 4323 sky2_write32(hw, B0_IMSK, 0);
4324 napi_disable(&hw->napi);
4331 sky2_power_aux(hw); 4325 sky2_power_aux(hw);
4332 4326
4333 pci_save_state(pdev); 4327 pci_save_state(pdev);
@@ -4362,8 +4356,8 @@ static int sky2_resume(struct pci_dev *pdev)
4362 pci_write_config_dword(pdev, PCI_DEV_REG3, 0); 4356 pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
4363 4357
4364 sky2_reset(hw); 4358 sky2_reset(hw);
4365
4366 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 4359 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
4360 napi_enable(&hw->napi);
4367 4361
4368 for (i = 0; i < hw->ports; i++) { 4362 for (i = 0; i < hw->ports; i++) {
4369 struct net_device *dev = hw->dev[i]; 4363 struct net_device *dev = hw->dev[i];
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 452c88d971a..6f85db3535e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -407,6 +407,24 @@ static inline void napi_enable(struct napi_struct *n)
407 clear_bit(NAPI_STATE_SCHED, &n->state); 407 clear_bit(NAPI_STATE_SCHED, &n->state);
408} 408}
409 409
410#ifdef CONFIG_SMP
411/**
412 * napi_synchronize - wait until NAPI is not running
413 * @n: napi context
414 *
415 * Wait until NAPI is done being scheduled on this context.
416 * Waits till any outstanding processing completes but
417 * does not disable future activations.
418 */
419static inline void napi_synchronize(const struct napi_struct *n)
420{
421 while (test_bit(NAPI_STATE_SCHED, &n->state))
422 msleep(1);
423}
424#else
425# define napi_synchronize(n) barrier()
426#endif
427
410/* 428/*
411 * The DEVICE structure. 429 * The DEVICE structure.
412 * Actually, this whole structure is a big mistake. It mixes I/O 430 * Actually, this whole structure is a big mistake. It mixes I/O