aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c17
-rw-r--r--drivers/net/3c505.c18
-rw-r--r--drivers/net/3c507.c15
-rw-r--r--drivers/net/3c509.c25
-rw-r--r--drivers/net/3c515.c21
-rw-r--r--drivers/net/3c523.c25
-rw-r--r--drivers/net/3c527.c19
-rw-r--r--drivers/net/3c59x.c55
-rw-r--r--drivers/net/8139cp.c6
-rw-r--r--drivers/net/8139too.c6
-rw-r--r--drivers/net/82596.c17
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/amd8111e.c6
-rw-r--r--drivers/net/arcnet/arc-rawmode.c2
-rw-r--r--drivers/net/arcnet/arcnet.c71
-rw-r--r--drivers/net/arcnet/capmode.c2
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/arcnet/com20020.c11
-rw-r--r--drivers/net/arcnet/rfc1051.c12
-rw-r--r--drivers/net/arcnet/rfc1201.c47
-rw-r--r--drivers/net/arm/ep93xx_eth.c8
-rw-r--r--drivers/net/arm/ixp4xx_eth.c12
-rw-r--r--drivers/net/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/au1000_eth.c1043
-rw-r--r--drivers/net/b44.c6
-rw-r--r--drivers/net/bnx2.c22
-rw-r--r--drivers/net/bnx2x_main.c6
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/cassini.c8
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/cpmac.c10
-rw-r--r--drivers/net/cxgb3/adapter.h14
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c65
-rw-r--r--drivers/net/cxgb3/sge.c119
-rw-r--r--drivers/net/e100.c6
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_main.c39
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/netdev.c52
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c128
-rw-r--r--drivers/net/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/enic/enic_main.c12
-rw-r--r--drivers/net/epic100.c6
-rw-r--r--drivers/net/forcedeth.c10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/hamradio/6pack.c16
-rw-r--r--drivers/net/hamradio/baycom_epp.c43
-rw-r--r--drivers/net/hamradio/bpqether.c38
-rw-r--r--drivers/net/hamradio/dmascc.c54
-rw-r--r--drivers/net/hamradio/hdlcdrv.c45
-rw-r--r--drivers/net/hamradio/mkiss.c46
-rw-r--r--drivers/net/hamradio/scc.c21
-rw-r--r--drivers/net/hamradio/yam.c61
-rw-r--r--drivers/net/ibmveth.c8
-rw-r--r--drivers/net/igb/e1000_82575.c7
-rw-r--r--drivers/net/igb/igb.h16
-rw-r--r--drivers/net/igb/igb_ethtool.c17
-rw-r--r--drivers/net/igb/igb_main.c103
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c93
-rw-r--r--drivers/net/ixp2000/ixpdev.c4
-rw-r--r--drivers/net/jme.h6
-rw-r--r--drivers/net/korina.c4
-rw-r--r--drivers/net/macb.c10
-rw-r--r--drivers/net/mlx4/en_rx.c4
-rw-r--r--drivers/net/myri10ge/myri10ge.c6
-rw-r--r--drivers/net/natsemi.c6
-rw-r--r--drivers/net/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/niu.c6
-rw-r--r--drivers/net/pasemi_mac.c6
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/phy/mdio-gpio.c13
-rw-r--r--drivers/net/ppp_generic.c275
-rw-r--r--drivers/net/pppoe.c529
-rw-r--r--drivers/net/pppol2tp.c166
-rw-r--r--drivers/net/pppox.c3
-rw-r--r--drivers/net/ps3_gelic_net.c24
-rw-r--r--drivers/net/ps3_gelic_wireless.c28
-rw-r--r--drivers/net/qla3xxx.c6
-rw-r--r--drivers/net/qlge/qlge_main.c6
-rw-r--r--drivers/net/r6040.c4
-rw-r--r--drivers/net/r8169.c6
-rw-r--r--drivers/net/s2io.c8
-rw-r--r--drivers/net/sb1250-mac.c6
-rw-r--r--drivers/net/sc92031.c27
-rw-r--r--drivers/net/sfc/Kconfig1
-rw-r--r--drivers/net/sfc/efx.c15
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/net_driver.h9
-rw-r--r--drivers/net/sfc/rx.c207
-rw-r--r--drivers/net/sfc/rx.h3
-rw-r--r--drivers/net/sfc/sfe4001.c1
-rw-r--r--drivers/net/sfc/tenxpress.c1
-rw-r--r--drivers/net/skge.c6
-rw-r--r--drivers/net/smc91x.c116
-rw-r--r--drivers/net/smc91x.h10
-rw-r--r--drivers/net/smsc911x.c8
-rw-r--r--drivers/net/smsc9420.c4
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/starfire.c6
-rw-r--r--drivers/net/sungem.c6
-rw-r--r--drivers/net/tc35815.c6
-rw-r--r--drivers/net/tehuti.c6
-rw-r--r--drivers/net/tg3.c18
-rw-r--r--drivers/net/tokenring/3c359.c17
-rw-r--r--drivers/net/tokenring/abyss.c10
-rw-r--r--drivers/net/tokenring/ibmtr.c43
-rw-r--r--drivers/net/tokenring/lanstreamer.c43
-rw-r--r--drivers/net/tokenring/lanstreamer.h1
-rw-r--r--drivers/net/tokenring/olympic.c38
-rw-r--r--drivers/net/tokenring/olympic.h1
-rw-r--r--drivers/net/tokenring/tms380tr.c21
-rw-r--r--drivers/net/tokenring/tms380tr.h1
-rw-r--r--drivers/net/tokenring/tmspci.c4
-rw-r--r--drivers/net/tsi108_eth.c8
-rw-r--r--drivers/net/tulip/interrupt.c10
-rw-r--r--drivers/net/tun.c371
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/typhoon.h234
-rw-r--r--drivers/net/ucc_geth.c6
-rw-r--r--drivers/net/usb/smsc95xx.c8
-rw-r--r--drivers/net/via-rhine.c4
-rw-r--r--drivers/net/virtio_net.c55
-rw-r--r--drivers/net/wan/c101.c12
-rw-r--r--drivers/net/wan/cosa.c14
-rw-r--r--drivers/net/wan/dscc4.c18
-rw-r--r--drivers/net/wan/farsync.c18
-rw-r--r--drivers/net/wan/hd64572.c4
-rw-r--r--drivers/net/wan/hdlc.c29
-rw-r--r--drivers/net/wan/hdlc_cisco.c1
-rw-r--r--drivers/net/wan/hdlc_fr.c28
-rw-r--r--drivers/net/wan/hdlc_ppp.c2
-rw-r--r--drivers/net/wan/hdlc_raw.c3
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c8
-rw-r--r--drivers/net/wan/hdlc_x25.c2
-rw-r--r--drivers/net/wan/hostess_sv11.c12
-rw-r--r--drivers/net/wan/ixp4xx_hss.c24
-rw-r--r--drivers/net/wan/lmc/lmc_main.c19
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c17
-rw-r--r--drivers/net/wan/n2.c12
-rw-r--r--drivers/net/wan/pc300too.c12
-rw-r--r--drivers/net/wan/pci200syn.c12
-rw-r--r--drivers/net/wan/sealevel.c12
-rw-r--r--drivers/net/wan/wanxl.c14
-rw-r--r--drivers/net/wimax/i2400m/netdev.c14
-rw-r--r--drivers/net/wireless/ath5k/debug.c2
-rw-r--r--drivers/net/wireless/libertas/debugfs.c14
-rw-r--r--drivers/net/wireless/strip.c2
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/net/znet.c17
155 files changed, 2706 insertions, 2680 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 3d1318a3e688..1c5344aa57cc 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -197,6 +197,17 @@ out:
197 return ERR_PTR(err); 197 return ERR_PTR(err);
198} 198}
199 199
200static const struct net_device_ops el_netdev_ops = {
201 .ndo_open = el_open,
202 .ndo_stop = el1_close,
203 .ndo_start_xmit = el_start_xmit,
204 .ndo_tx_timeout = el_timeout,
205 .ndo_set_multicast_list = set_multicast_list,
206 .ndo_change_mtu = eth_change_mtu,
207 .ndo_set_mac_address = eth_mac_addr,
208 .ndo_validate_addr = eth_validate_addr,
209};
210
200/** 211/**
201 * el1_probe1: 212 * el1_probe1:
202 * @dev: The device structure to use 213 * @dev: The device structure to use
@@ -305,12 +316,8 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr)
305 * The EL1-specific entries in the device structure. 316 * The EL1-specific entries in the device structure.
306 */ 317 */
307 318
308 dev->open = &el_open; 319 dev->netdev_ops = &el_netdev_ops;
309 dev->hard_start_xmit = &el_start_xmit;
310 dev->tx_timeout = &el_timeout;
311 dev->watchdog_timeo = HZ; 320 dev->watchdog_timeo = HZ;
312 dev->stop = &el1_close;
313 dev->set_multicast_list = &set_multicast_list;
314 dev->ethtool_ops = &netdev_ethtool_ops; 321 dev->ethtool_ops = &netdev_ethtool_ops;
315 return 0; 322 return 0;
316} 323}
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 6124605bef05..ea1ad8ce8836 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1348,6 +1348,17 @@ static int __init elp_autodetect(struct net_device *dev)
1348 return 0; /* Because of this, the layer above will return -ENODEV */ 1348 return 0; /* Because of this, the layer above will return -ENODEV */
1349} 1349}
1350 1350
1351static const struct net_device_ops elp_netdev_ops = {
1352 .ndo_open = elp_open,
1353 .ndo_stop = elp_close,
1354 .ndo_get_stats = elp_get_stats,
1355 .ndo_start_xmit = elp_start_xmit,
1356 .ndo_tx_timeout = elp_timeout,
1357 .ndo_set_multicast_list = elp_set_mc_list,
1358 .ndo_change_mtu = eth_change_mtu,
1359 .ndo_set_mac_address = eth_mac_addr,
1360 .ndo_validate_addr = eth_validate_addr,
1361};
1351 1362
1352/****************************************************** 1363/******************************************************
1353 * 1364 *
@@ -1552,13 +1563,8 @@ static int __init elplus_setup(struct net_device *dev)
1552 printk(KERN_ERR "%s: adapter configuration failed\n", dev->name); 1563 printk(KERN_ERR "%s: adapter configuration failed\n", dev->name);
1553 } 1564 }
1554 1565
1555 dev->open = elp_open; /* local */ 1566 dev->netdev_ops = &elp_netdev_ops;
1556 dev->stop = elp_close; /* local */
1557 dev->get_stats = elp_get_stats; /* local */
1558 dev->hard_start_xmit = elp_start_xmit; /* local */
1559 dev->tx_timeout = elp_timeout; /* local */
1560 dev->watchdog_timeo = 10*HZ; 1567 dev->watchdog_timeo = 10*HZ;
1561 dev->set_multicast_list = elp_set_mc_list; /* local */
1562 dev->ethtool_ops = &netdev_ethtool_ops; /* local */ 1568 dev->ethtool_ops = &netdev_ethtool_ops; /* local */
1563 1569
1564 dev->mem_start = dev->mem_end = 0; 1570 dev->mem_start = dev->mem_end = 0;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index 423e65d0ba73..fbbaf826deff 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -352,6 +352,16 @@ out:
352 return ERR_PTR(err); 352 return ERR_PTR(err);
353} 353}
354 354
355static const struct net_device_ops netdev_ops = {
356 .ndo_open = el16_open,
357 .ndo_stop = el16_close,
358 .ndo_start_xmit = el16_send_packet,
359 .ndo_tx_timeout = el16_tx_timeout,
360 .ndo_change_mtu = eth_change_mtu,
361 .ndo_set_mac_address = eth_mac_addr,
362 .ndo_validate_addr = eth_validate_addr,
363};
364
355static int __init el16_probe1(struct net_device *dev, int ioaddr) 365static int __init el16_probe1(struct net_device *dev, int ioaddr)
356{ 366{
357 static unsigned char init_ID_done, version_printed; 367 static unsigned char init_ID_done, version_printed;
@@ -449,10 +459,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
449 goto out1; 459 goto out1;
450 } 460 }
451 461
452 dev->open = el16_open; 462 dev->netdev_ops = &netdev_ops;
453 dev->stop = el16_close;
454 dev->hard_start_xmit = el16_send_packet;
455 dev->tx_timeout = el16_tx_timeout;
456 dev->watchdog_timeo = TX_TIMEOUT; 463 dev->watchdog_timeo = TX_TIMEOUT;
457 dev->ethtool_ops = &netdev_ethtool_ops; 464 dev->ethtool_ops = &netdev_ethtool_ops;
458 dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */ 465 dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 535c234286ea..d58919c7032e 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -537,6 +537,21 @@ static struct mca_driver el3_mca_driver = {
537static int mca_registered; 537static int mca_registered;
538#endif /* CONFIG_MCA */ 538#endif /* CONFIG_MCA */
539 539
540static const struct net_device_ops netdev_ops = {
541 .ndo_open = el3_open,
542 .ndo_stop = el3_close,
543 .ndo_start_xmit = el3_start_xmit,
544 .ndo_get_stats = el3_get_stats,
545 .ndo_set_multicast_list = set_multicast_list,
546 .ndo_tx_timeout = el3_tx_timeout,
547 .ndo_change_mtu = eth_change_mtu,
548 .ndo_set_mac_address = eth_mac_addr,
549 .ndo_validate_addr = eth_validate_addr,
550#ifdef CONFIG_NET_POLL_CONTROLLER
551 .ndo_poll_controller = el3_poll_controller,
552#endif
553};
554
540static int __devinit el3_common_init(struct net_device *dev) 555static int __devinit el3_common_init(struct net_device *dev)
541{ 556{
542 struct el3_private *lp = netdev_priv(dev); 557 struct el3_private *lp = netdev_priv(dev);
@@ -553,16 +568,8 @@ static int __devinit el3_common_init(struct net_device *dev)
553 } 568 }
554 569
555 /* The EL3-specific entries in the device structure. */ 570 /* The EL3-specific entries in the device structure. */
556 dev->open = &el3_open; 571 dev->netdev_ops = &netdev_ops;
557 dev->hard_start_xmit = &el3_start_xmit;
558 dev->stop = &el3_close;
559 dev->get_stats = &el3_get_stats;
560 dev->set_multicast_list = &set_multicast_list;
561 dev->tx_timeout = el3_tx_timeout;
562 dev->watchdog_timeo = TX_TIMEOUT; 572 dev->watchdog_timeo = TX_TIMEOUT;
563#ifdef CONFIG_NET_POLL_CONTROLLER
564 dev->poll_controller = el3_poll_controller;
565#endif
566 SET_ETHTOOL_OPS(dev, &ethtool_ops); 573 SET_ETHTOOL_OPS(dev, &ethtool_ops);
567 574
568 err = register_netdev(dev); 575 err = register_netdev(dev);
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 39ac12233aa7..167bf23066ea 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -563,6 +563,20 @@ no_pnp:
563 return NULL; 563 return NULL;
564} 564}
565 565
566
567static const struct net_device_ops netdev_ops = {
568 .ndo_open = corkscrew_open,
569 .ndo_stop = corkscrew_close,
570 .ndo_start_xmit = corkscrew_start_xmit,
571 .ndo_tx_timeout = corkscrew_timeout,
572 .ndo_get_stats = corkscrew_get_stats,
573 .ndo_set_multicast_list = set_rx_mode,
574 .ndo_change_mtu = eth_change_mtu,
575 .ndo_set_mac_address = eth_mac_addr,
576 .ndo_validate_addr = eth_validate_addr,
577};
578
579
566static int corkscrew_setup(struct net_device *dev, int ioaddr, 580static int corkscrew_setup(struct net_device *dev, int ioaddr,
567 struct pnp_dev *idev, int card_number) 581 struct pnp_dev *idev, int card_number)
568{ 582{
@@ -681,13 +695,8 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
681 vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0; 695 vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0;
682 696
683 /* The 3c51x-specific entries in the device structure. */ 697 /* The 3c51x-specific entries in the device structure. */
684 dev->open = &corkscrew_open; 698 dev->netdev_ops = &netdev_ops;
685 dev->hard_start_xmit = &corkscrew_start_xmit;
686 dev->tx_timeout = &corkscrew_timeout;
687 dev->watchdog_timeo = (400 * HZ) / 1000; 699 dev->watchdog_timeo = (400 * HZ) / 1000;
688 dev->stop = &corkscrew_close;
689 dev->get_stats = &corkscrew_get_stats;
690 dev->set_multicast_list = &set_rx_mode;
691 dev->ethtool_ops = &netdev_ethtool_ops; 700 dev->ethtool_ops = &netdev_ethtool_ops;
692 701
693 return register_netdev(dev); 702 return register_netdev(dev);
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index ff41e1ff5603..8f734d74b513 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -403,6 +403,20 @@ static int elmc_getinfo(char *buf, int slot, void *d)
403 return len; 403 return len;
404} /* elmc_getinfo() */ 404} /* elmc_getinfo() */
405 405
406static const struct net_device_ops netdev_ops = {
407 .ndo_open = elmc_open,
408 .ndo_stop = elmc_close,
409 .ndo_get_stats = elmc_get_stats,
410 .ndo_start_xmit = elmc_send_packet,
411 .ndo_tx_timeout = elmc_timeout,
412#ifdef ELMC_MULTICAST
413 .ndo_set_multicast_list = set_multicast_list,
414#endif
415 .ndo_change_mtu = eth_change_mtu,
416 .ndo_set_mac_address = eth_mac_addr,
417 .ndo_validate_addr = eth_validate_addr,
418};
419
406/*****************************************************************/ 420/*****************************************************************/
407 421
408static int __init do_elmc_probe(struct net_device *dev) 422static int __init do_elmc_probe(struct net_device *dev)
@@ -544,17 +558,8 @@ static int __init do_elmc_probe(struct net_device *dev)
544 printk(KERN_INFO "%s: hardware address %pM\n", 558 printk(KERN_INFO "%s: hardware address %pM\n",
545 dev->name, dev->dev_addr); 559 dev->name, dev->dev_addr);
546 560
547 dev->open = &elmc_open; 561 dev->netdev_ops = &netdev_ops;
548 dev->stop = &elmc_close;
549 dev->get_stats = &elmc_get_stats;
550 dev->hard_start_xmit = &elmc_send_packet;
551 dev->tx_timeout = &elmc_timeout;
552 dev->watchdog_timeo = HZ; 562 dev->watchdog_timeo = HZ;
553#ifdef ELMC_MULTICAST
554 dev->set_multicast_list = &set_multicast_list;
555#else
556 dev->set_multicast_list = NULL;
557#endif
558 dev->ethtool_ops = &netdev_ethtool_ops; 563 dev->ethtool_ops = &netdev_ethtool_ops;
559 564
560 /* note that we haven't actually requested the IRQ from the kernel. 565 /* note that we haven't actually requested the IRQ from the kernel.
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 2df3af3b9b20..b61073c42bf8 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -288,6 +288,18 @@ struct net_device *__init mc32_probe(int unit)
288 return ERR_PTR(-ENODEV); 288 return ERR_PTR(-ENODEV);
289} 289}
290 290
291static const struct net_device_ops netdev_ops = {
292 .ndo_open = mc32_open,
293 .ndo_stop = mc32_close,
294 .ndo_start_xmit = mc32_send_packet,
295 .ndo_get_stats = mc32_get_stats,
296 .ndo_set_multicast_list = mc32_set_multicast_list,
297 .ndo_tx_timeout = mc32_timeout,
298 .ndo_change_mtu = eth_change_mtu,
299 .ndo_set_mac_address = eth_mac_addr,
300 .ndo_validate_addr = eth_validate_addr,
301};
302
291/** 303/**
292 * mc32_probe1 - Check a given slot for a board and test the card 304 * mc32_probe1 - Check a given slot for a board and test the card
293 * @dev: Device structure to fill in 305 * @dev: Device structure to fill in
@@ -518,12 +530,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
518 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", 530 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
519 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); 531 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
520 532
521 dev->open = mc32_open; 533 dev->netdev_ops = &netdev_ops;
522 dev->stop = mc32_close;
523 dev->hard_start_xmit = mc32_send_packet;
524 dev->get_stats = mc32_get_stats;
525 dev->set_multicast_list = mc32_set_multicast_list;
526 dev->tx_timeout = mc32_timeout;
527 dev->watchdog_timeo = HZ*5; /* Board does all the work */ 534 dev->watchdog_timeo = HZ*5; /* Board does all the work */
528 dev->ethtool_ops = &netdev_ethtool_ops; 535 dev->ethtool_ops = &netdev_ethtool_ops;
529 536
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index cdbbb6226fc5..b2563d384cf2 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -992,6 +992,42 @@ out:
992 return rc; 992 return rc;
993} 993}
994 994
995static const struct net_device_ops boomrang_netdev_ops = {
996 .ndo_open = vortex_open,
997 .ndo_stop = vortex_close,
998 .ndo_start_xmit = boomerang_start_xmit,
999 .ndo_tx_timeout = vortex_tx_timeout,
1000 .ndo_get_stats = vortex_get_stats,
1001#ifdef CONFIG_PCI
1002 .ndo_do_ioctl = vortex_ioctl,
1003#endif
1004 .ndo_set_multicast_list = set_rx_mode,
1005 .ndo_change_mtu = eth_change_mtu,
1006 .ndo_set_mac_address = eth_mac_addr,
1007 .ndo_validate_addr = eth_validate_addr,
1008#ifdef CONFIG_NET_POLL_CONTROLLER
1009 .ndo_poll_controller = poll_vortex,
1010#endif
1011};
1012
1013static const struct net_device_ops vortex_netdev_ops = {
1014 .ndo_open = vortex_open,
1015 .ndo_stop = vortex_close,
1016 .ndo_start_xmit = vortex_start_xmit,
1017 .ndo_tx_timeout = vortex_tx_timeout,
1018 .ndo_get_stats = vortex_get_stats,
1019#ifdef CONFIG_PCI
1020 .ndo_do_ioctl = vortex_ioctl,
1021#endif
1022 .ndo_set_multicast_list = set_rx_mode,
1023 .ndo_change_mtu = eth_change_mtu,
1024 .ndo_set_mac_address = eth_mac_addr,
1025 .ndo_validate_addr = eth_validate_addr,
1026#ifdef CONFIG_NET_POLL_CONTROLLER
1027 .ndo_poll_controller = poll_vortex,
1028#endif
1029};
1030
995/* 1031/*
996 * Start up the PCI/EISA device which is described by *gendev. 1032 * Start up the PCI/EISA device which is described by *gendev.
997 * Return 0 on success. 1033 * Return 0 on success.
@@ -1366,18 +1402,16 @@ static int __devinit vortex_probe1(struct device *gendev,
1366 } 1402 }
1367 1403
1368 /* The 3c59x-specific entries in the device structure. */ 1404 /* The 3c59x-specific entries in the device structure. */
1369 dev->open = vortex_open;
1370 if (vp->full_bus_master_tx) { 1405 if (vp->full_bus_master_tx) {
1371 dev->hard_start_xmit = boomerang_start_xmit; 1406 dev->netdev_ops = &boomrang_netdev_ops;
1372 /* Actually, it still should work with iommu. */ 1407 /* Actually, it still should work with iommu. */
1373 if (card_idx < MAX_UNITS && 1408 if (card_idx < MAX_UNITS &&
1374 ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || 1409 ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
1375 hw_checksums[card_idx] == 1)) { 1410 hw_checksums[card_idx] == 1)) {
1376 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 1411 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1377 } 1412 }
1378 } else { 1413 } else
1379 dev->hard_start_xmit = vortex_start_xmit; 1414 dev->netdev_ops = &vortex_netdev_ops;
1380 }
1381 1415
1382 if (print_info) { 1416 if (print_info) {
1383 printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n", 1417 printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
@@ -1386,18 +1420,9 @@ static int __devinit vortex_probe1(struct device *gendev,
1386 (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); 1420 (dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
1387 } 1421 }
1388 1422
1389 dev->stop = vortex_close;
1390 dev->get_stats = vortex_get_stats;
1391#ifdef CONFIG_PCI
1392 dev->do_ioctl = vortex_ioctl;
1393#endif
1394 dev->ethtool_ops = &vortex_ethtool_ops; 1423 dev->ethtool_ops = &vortex_ethtool_ops;
1395 dev->set_multicast_list = set_rx_mode;
1396 dev->tx_timeout = vortex_tx_timeout;
1397 dev->watchdog_timeo = (watchdog * HZ) / 1000; 1424 dev->watchdog_timeo = (watchdog * HZ) / 1000;
1398#ifdef CONFIG_NET_POLL_CONTROLLER 1425
1399 dev->poll_controller = poll_vortex;
1400#endif
1401 if (pdev) { 1426 if (pdev) {
1402 vp->pm_state_valid = 1; 1427 vp->pm_state_valid = 1;
1403 pci_save_state(VORTEX_PCI(vp)); 1428 pci_save_state(VORTEX_PCI(vp));
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 4e19ae3ce6be..35517b06ec3f 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -604,7 +604,7 @@ rx_next:
604 604
605 spin_lock_irqsave(&cp->lock, flags); 605 spin_lock_irqsave(&cp->lock, flags);
606 cpw16_f(IntrMask, cp_intr_mask); 606 cpw16_f(IntrMask, cp_intr_mask);
607 __netif_rx_complete(napi); 607 __napi_complete(napi);
608 spin_unlock_irqrestore(&cp->lock, flags); 608 spin_unlock_irqrestore(&cp->lock, flags);
609 } 609 }
610 610
@@ -641,9 +641,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
641 } 641 }
642 642
643 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) 643 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
644 if (netif_rx_schedule_prep(&cp->napi)) { 644 if (napi_schedule_prep(&cp->napi)) {
645 cpw16_f(IntrMask, cp_norx_intr_mask); 645 cpw16_f(IntrMask, cp_norx_intr_mask);
646 __netif_rx_schedule(&cp->napi); 646 __napi_schedule(&cp->napi);
647 } 647 }
648 648
649 if (status & (TxOK | TxErr | TxEmpty | SWInt)) 649 if (status & (TxOK | TxErr | TxEmpty | SWInt))
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a5b24202d564..5341da604e84 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2128,7 +2128,7 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
2128 */ 2128 */
2129 spin_lock_irqsave(&tp->lock, flags); 2129 spin_lock_irqsave(&tp->lock, flags);
2130 RTL_W16_F(IntrMask, rtl8139_intr_mask); 2130 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2131 __netif_rx_complete(napi); 2131 __napi_complete(napi);
2132 spin_unlock_irqrestore(&tp->lock, flags); 2132 spin_unlock_irqrestore(&tp->lock, flags);
2133 } 2133 }
2134 spin_unlock(&tp->rx_lock); 2134 spin_unlock(&tp->rx_lock);
@@ -2178,9 +2178,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
2178 /* Receive packets are processed by poll routine. 2178 /* Receive packets are processed by poll routine.
2179 If not running start it now. */ 2179 If not running start it now. */
2180 if (status & RxAckBits){ 2180 if (status & RxAckBits){
2181 if (netif_rx_schedule_prep(&tp->napi)) { 2181 if (napi_schedule_prep(&tp->napi)) {
2182 RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); 2182 RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
2183 __netif_rx_schedule(&tp->napi); 2183 __napi_schedule(&tp->napi);
2184 } 2184 }
2185 } 2185 }
2186 2186
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index b273596368e3..cca94b9c08ae 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1122,6 +1122,17 @@ static void print_eth(unsigned char *add, char *str)
1122static int io = 0x300; 1122static int io = 0x300;
1123static int irq = 10; 1123static int irq = 10;
1124 1124
1125static const struct net_device_ops i596_netdev_ops = {
1126 .ndo_open = i596_open,
1127 .ndo_stop = i596_close,
1128 .ndo_start_xmit = i596_start_xmit,
1129 .ndo_set_multicast_list = set_multicast_list,
1130 .ndo_tx_timeout = i596_tx_timeout,
1131 .ndo_change_mtu = eth_change_mtu,
1132 .ndo_set_mac_address = eth_mac_addr,
1133 .ndo_validate_addr = eth_validate_addr,
1134};
1135
1125struct net_device * __init i82596_probe(int unit) 1136struct net_device * __init i82596_probe(int unit)
1126{ 1137{
1127 struct net_device *dev; 1138 struct net_device *dev;
@@ -1232,11 +1243,7 @@ found:
1232 DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); 1243 DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
1233 1244
1234 /* The 82596-specific entries in the device structure. */ 1245 /* The 82596-specific entries in the device structure. */
1235 dev->open = i596_open; 1246 dev->netdev_ops = &i596_netdev_ops;
1236 dev->stop = i596_close;
1237 dev->hard_start_xmit = i596_start_xmit;
1238 dev->set_multicast_list = set_multicast_list;
1239 dev->tx_timeout = i596_tx_timeout;
1240 dev->watchdog_timeo = TX_TIMEOUT; 1247 dev->watchdog_timeo = TX_TIMEOUT;
1241 1248
1242 dev->ml_priv = (void *)(dev->mem_start); 1249 dev->ml_priv = (void *)(dev->mem_start);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9fe8cb7d43ac..c4776a2adf00 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2022,7 +2022,6 @@ config IGB
2022config IGB_LRO 2022config IGB_LRO
2023 bool "Use software LRO" 2023 bool "Use software LRO"
2024 depends on IGB && INET 2024 depends on IGB && INET
2025 select INET_LRO
2026 ---help--- 2025 ---help---
2027 Say Y here if you want to use large receive offload. 2026 Say Y here if you want to use large receive offload.
2028 2027
@@ -2408,7 +2407,6 @@ config CHELSIO_T3
2408 tristate "Chelsio Communications T3 10Gb Ethernet support" 2407 tristate "Chelsio Communications T3 10Gb Ethernet support"
2409 depends on CHELSIO_T3_DEPENDS 2408 depends on CHELSIO_T3_DEPENDS
2410 select FW_LOADER 2409 select FW_LOADER
2411 select INET_LRO
2412 help 2410 help
2413 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet 2411 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
2414 adapters. 2412 adapters.
@@ -2444,7 +2442,6 @@ config ENIC
2444config IXGBE 2442config IXGBE
2445 tristate "Intel(R) 10GbE PCI Express adapters support" 2443 tristate "Intel(R) 10GbE PCI Express adapters support"
2446 depends on PCI && INET 2444 depends on PCI && INET
2447 select INET_LRO
2448 ---help--- 2445 ---help---
2449 This driver supports Intel(R) 10GbE PCI Express family of 2446 This driver supports Intel(R) 10GbE PCI Express family of
2450 adapters. For more information on how to identify your adapter, go 2447 adapters. For more information on how to identify your adapter, go
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 7709992bb6bf..cb9c95d3ed0a 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -831,7 +831,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
831 if (rx_pkt_limit > 0) { 831 if (rx_pkt_limit > 0) {
832 /* Receive descriptor is empty now */ 832 /* Receive descriptor is empty now */
833 spin_lock_irqsave(&lp->lock, flags); 833 spin_lock_irqsave(&lp->lock, flags);
834 __netif_rx_complete(napi); 834 __napi_complete(napi);
835 writel(VAL0|RINTEN0, mmio + INTEN0); 835 writel(VAL0|RINTEN0, mmio + INTEN0);
836 writel(VAL2 | RDMD0, mmio + CMD0); 836 writel(VAL2 | RDMD0, mmio + CMD0);
837 spin_unlock_irqrestore(&lp->lock, flags); 837 spin_unlock_irqrestore(&lp->lock, flags);
@@ -1170,11 +1170,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1170 1170
1171 /* Check if Receive Interrupt has occurred. */ 1171 /* Check if Receive Interrupt has occurred. */
1172 if (intr0 & RINT0) { 1172 if (intr0 & RINT0) {
1173 if (netif_rx_schedule_prep(&lp->napi)) { 1173 if (napi_schedule_prep(&lp->napi)) {
1174 /* Disable receive interupts */ 1174 /* Disable receive interupts */
1175 writel(RINTEN0, mmio + INTEN0); 1175 writel(RINTEN0, mmio + INTEN0);
1176 /* Schedule a polling routine */ 1176 /* Schedule a polling routine */
1177 __netif_rx_schedule(&lp->napi); 1177 __napi_schedule(&lp->napi);
1178 } else if (intren0 & RINTEN0) { 1178 } else if (intren0 & RINTEN0) {
1179 printk("************Driver bug! \ 1179 printk("************Driver bug! \
1180 interrupt while in poll\n"); 1180 interrupt while in poll\n");
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 3ff9affb1a91..da017cbb5f64 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -102,7 +102,7 @@ static void rx(struct net_device *dev, int bufnum,
102 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); 102 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
103 if (skb == NULL) { 103 if (skb == NULL) {
104 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); 104 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
105 lp->stats.rx_dropped++; 105 dev->stats.rx_dropped++;
106 return; 106 return;
107 } 107 }
108 skb_put(skb, length + ARC_HDR_SIZE); 108 skb_put(skb, length + ARC_HDR_SIZE);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 6b53e5ed125c..a80d4a30a464 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -95,17 +95,16 @@ EXPORT_SYMBOL(arcnet_unregister_proto);
95EXPORT_SYMBOL(arcnet_debug); 95EXPORT_SYMBOL(arcnet_debug);
96EXPORT_SYMBOL(alloc_arcdev); 96EXPORT_SYMBOL(alloc_arcdev);
97EXPORT_SYMBOL(arcnet_interrupt); 97EXPORT_SYMBOL(arcnet_interrupt);
98EXPORT_SYMBOL(arcnet_open);
99EXPORT_SYMBOL(arcnet_close);
100EXPORT_SYMBOL(arcnet_send_packet);
101EXPORT_SYMBOL(arcnet_timeout);
98 102
99/* Internal function prototypes */ 103/* Internal function prototypes */
100static int arcnet_open(struct net_device *dev);
101static int arcnet_close(struct net_device *dev);
102static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev);
103static void arcnet_timeout(struct net_device *dev);
104static int arcnet_header(struct sk_buff *skb, struct net_device *dev, 104static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
105 unsigned short type, const void *daddr, 105 unsigned short type, const void *daddr,
106 const void *saddr, unsigned len); 106 const void *saddr, unsigned len);
107static int arcnet_rebuild_header(struct sk_buff *skb); 107static int arcnet_rebuild_header(struct sk_buff *skb);
108static struct net_device_stats *arcnet_get_stats(struct net_device *dev);
109static int go_tx(struct net_device *dev); 108static int go_tx(struct net_device *dev);
110 109
111static int debug = ARCNET_DEBUG; 110static int debug = ARCNET_DEBUG;
@@ -322,11 +321,18 @@ static const struct header_ops arcnet_header_ops = {
322 .rebuild = arcnet_rebuild_header, 321 .rebuild = arcnet_rebuild_header,
323}; 322};
324 323
324static const struct net_device_ops arcnet_netdev_ops = {
325 .ndo_open = arcnet_open,
326 .ndo_stop = arcnet_close,
327 .ndo_start_xmit = arcnet_send_packet,
328 .ndo_tx_timeout = arcnet_timeout,
329};
325 330
326/* Setup a struct device for ARCnet. */ 331/* Setup a struct device for ARCnet. */
327static void arcdev_setup(struct net_device *dev) 332static void arcdev_setup(struct net_device *dev)
328{ 333{
329 dev->type = ARPHRD_ARCNET; 334 dev->type = ARPHRD_ARCNET;
335 dev->netdev_ops = &arcnet_netdev_ops;
330 dev->header_ops = &arcnet_header_ops; 336 dev->header_ops = &arcnet_header_ops;
331 dev->hard_header_len = sizeof(struct archdr); 337 dev->hard_header_len = sizeof(struct archdr);
332 dev->mtu = choose_mtu(); 338 dev->mtu = choose_mtu();
@@ -339,18 +345,9 @@ static void arcdev_setup(struct net_device *dev)
339 /* New-style flags. */ 345 /* New-style flags. */
340 dev->flags = IFF_BROADCAST; 346 dev->flags = IFF_BROADCAST;
341 347
342 /*
343 * Put in this stuff here, so we don't have to export the symbols to
344 * the chipset drivers.
345 */
346 dev->open = arcnet_open;
347 dev->stop = arcnet_close;
348 dev->hard_start_xmit = arcnet_send_packet;
349 dev->tx_timeout = arcnet_timeout;
350 dev->get_stats = arcnet_get_stats;
351} 348}
352 349
353struct net_device *alloc_arcdev(char *name) 350struct net_device *alloc_arcdev(const char *name)
354{ 351{
355 struct net_device *dev; 352 struct net_device *dev;
356 353
@@ -372,7 +369,7 @@ struct net_device *alloc_arcdev(char *name)
372 * that "should" only need to be set once at boot, so that there is 369 * that "should" only need to be set once at boot, so that there is
373 * non-reboot way to recover if something goes wrong. 370 * non-reboot way to recover if something goes wrong.
374 */ 371 */
375static int arcnet_open(struct net_device *dev) 372int arcnet_open(struct net_device *dev)
376{ 373{
377 struct arcnet_local *lp = netdev_priv(dev); 374 struct arcnet_local *lp = netdev_priv(dev);
378 int count, newmtu, error; 375 int count, newmtu, error;
@@ -472,7 +469,7 @@ static int arcnet_open(struct net_device *dev)
472 469
473 470
474/* The inverse routine to arcnet_open - shuts down the card. */ 471/* The inverse routine to arcnet_open - shuts down the card. */
475static int arcnet_close(struct net_device *dev) 472int arcnet_close(struct net_device *dev)
476{ 473{
477 struct arcnet_local *lp = netdev_priv(dev); 474 struct arcnet_local *lp = netdev_priv(dev);
478 475
@@ -583,8 +580,8 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
583 } else { 580 } else {
584 BUGMSG(D_NORMAL, 581 BUGMSG(D_NORMAL,
585 "I don't understand ethernet protocol %Xh addresses!\n", type); 582 "I don't understand ethernet protocol %Xh addresses!\n", type);
586 lp->stats.tx_errors++; 583 dev->stats.tx_errors++;
587 lp->stats.tx_aborted_errors++; 584 dev->stats.tx_aborted_errors++;
588 } 585 }
589 586
590 /* if we couldn't resolve the address... give up. */ 587 /* if we couldn't resolve the address... give up. */
@@ -601,7 +598,7 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
601 598
602 599
603/* Called by the kernel in order to transmit a packet. */ 600/* Called by the kernel in order to transmit a packet. */
604static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) 601int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
605{ 602{
606 struct arcnet_local *lp = netdev_priv(dev); 603 struct arcnet_local *lp = netdev_priv(dev);
607 struct archdr *pkt; 604 struct archdr *pkt;
@@ -645,7 +642,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
645 !proto->ack_tx) { 642 !proto->ack_tx) {
646 /* done right away and we don't want to acknowledge 643 /* done right away and we don't want to acknowledge
647 the package later - forget about it now */ 644 the package later - forget about it now */
648 lp->stats.tx_bytes += skb->len; 645 dev->stats.tx_bytes += skb->len;
649 freeskb = 1; 646 freeskb = 1;
650 } else { 647 } else {
651 /* do it the 'split' way */ 648 /* do it the 'split' way */
@@ -709,7 +706,7 @@ static int go_tx(struct net_device *dev)
709 /* start sending */ 706 /* start sending */
710 ACOMMAND(TXcmd | (lp->cur_tx << 3)); 707 ACOMMAND(TXcmd | (lp->cur_tx << 3));
711 708
712 lp->stats.tx_packets++; 709 dev->stats.tx_packets++;
713 lp->lasttrans_dest = lp->lastload_dest; 710 lp->lasttrans_dest = lp->lastload_dest;
714 lp->lastload_dest = 0; 711 lp->lastload_dest = 0;
715 lp->excnak_pending = 0; 712 lp->excnak_pending = 0;
@@ -720,7 +717,7 @@ static int go_tx(struct net_device *dev)
720 717
721 718
722/* Called by the kernel when transmit times out */ 719/* Called by the kernel when transmit times out */
723static void arcnet_timeout(struct net_device *dev) 720void arcnet_timeout(struct net_device *dev)
724{ 721{
725 unsigned long flags; 722 unsigned long flags;
726 struct arcnet_local *lp = netdev_priv(dev); 723 struct arcnet_local *lp = netdev_priv(dev);
@@ -732,11 +729,11 @@ static void arcnet_timeout(struct net_device *dev)
732 msg = " - missed IRQ?"; 729 msg = " - missed IRQ?";
733 } else { 730 } else {
734 msg = ""; 731 msg = "";
735 lp->stats.tx_aborted_errors++; 732 dev->stats.tx_aborted_errors++;
736 lp->timed_out = 1; 733 lp->timed_out = 1;
737 ACOMMAND(NOTXcmd | (lp->cur_tx << 3)); 734 ACOMMAND(NOTXcmd | (lp->cur_tx << 3));
738 } 735 }
739 lp->stats.tx_errors++; 736 dev->stats.tx_errors++;
740 737
741 /* make sure we didn't miss a TX or a EXC NAK IRQ */ 738 /* make sure we didn't miss a TX or a EXC NAK IRQ */
742 AINTMASK(0); 739 AINTMASK(0);
@@ -865,8 +862,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
865 "transmit was not acknowledged! " 862 "transmit was not acknowledged! "
866 "(status=%Xh, dest=%02Xh)\n", 863 "(status=%Xh, dest=%02Xh)\n",
867 status, lp->lasttrans_dest); 864 status, lp->lasttrans_dest);
868 lp->stats.tx_errors++; 865 dev->stats.tx_errors++;
869 lp->stats.tx_carrier_errors++; 866 dev->stats.tx_carrier_errors++;
870 } else { 867 } else {
871 BUGMSG(D_DURING, 868 BUGMSG(D_DURING,
872 "broadcast was not acknowledged; that's normal " 869 "broadcast was not acknowledged; that's normal "
@@ -905,7 +902,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
905 if (txbuf != -1) { 902 if (txbuf != -1) {
906 if (lp->outgoing.proto->continue_tx(dev, txbuf)) { 903 if (lp->outgoing.proto->continue_tx(dev, txbuf)) {
907 /* that was the last segment */ 904 /* that was the last segment */
908 lp->stats.tx_bytes += lp->outgoing.skb->len; 905 dev->stats.tx_bytes += lp->outgoing.skb->len;
909 if(!lp->outgoing.proto->ack_tx) 906 if(!lp->outgoing.proto->ack_tx)
910 { 907 {
911 dev_kfree_skb_irq(lp->outgoing.skb); 908 dev_kfree_skb_irq(lp->outgoing.skb);
@@ -930,7 +927,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
930 } 927 }
931 if (status & lp->intmask & RECONflag) { 928 if (status & lp->intmask & RECONflag) {
932 ACOMMAND(CFLAGScmd | CONFIGclear); 929 ACOMMAND(CFLAGScmd | CONFIGclear);
933 lp->stats.tx_carrier_errors++; 930 dev->stats.tx_carrier_errors++;
934 931
935 BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n", 932 BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n",
936 status); 933 status);
@@ -1038,8 +1035,8 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
1038 "(%d+4 bytes)\n", 1035 "(%d+4 bytes)\n",
1039 bufnum, pkt.hard.source, pkt.hard.dest, length); 1036 bufnum, pkt.hard.source, pkt.hard.dest, length);
1040 1037
1041 lp->stats.rx_packets++; 1038 dev->stats.rx_packets++;
1042 lp->stats.rx_bytes += length + ARC_HDR_SIZE; 1039 dev->stats.rx_bytes += length + ARC_HDR_SIZE;
1043 1040
1044 /* call the right receiver for the protocol */ 1041 /* call the right receiver for the protocol */
1045 if (arc_proto_map[soft->proto]->is_ip) { 1042 if (arc_proto_map[soft->proto]->is_ip) {
@@ -1067,18 +1064,6 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
1067} 1064}
1068 1065
1069 1066
1070
1071/*
1072 * Get the current statistics. This may be called with the card open or
1073 * closed.
1074 */
1075static struct net_device_stats *arcnet_get_stats(struct net_device *dev)
1076{
1077 struct arcnet_local *lp = netdev_priv(dev);
1078 return &lp->stats;
1079}
1080
1081
1082static void null_rx(struct net_device *dev, int bufnum, 1067static void null_rx(struct net_device *dev, int bufnum,
1083 struct archdr *pkthdr, int length) 1068 struct archdr *pkthdr, int length)
1084{ 1069{
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 30580bbe252d..1613929ff301 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -119,7 +119,7 @@ static void rx(struct net_device *dev, int bufnum,
119 skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC); 119 skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC);
120 if (skb == NULL) { 120 if (skb == NULL) {
121 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); 121 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
122 lp->stats.rx_dropped++; 122 dev->stats.rx_dropped++;
123 return; 123 return;
124 } 124 }
125 skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); 125 skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index ea53a940272f..db08fc24047a 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -151,6 +151,8 @@ static int __init com20020_init(void)
151 if (node && node != 0xff) 151 if (node && node != 0xff)
152 dev->dev_addr[0] = node; 152 dev->dev_addr[0] = node;
153 153
154 dev->netdev_ops = &com20020_netdev_ops;
155
154 lp = netdev_priv(dev); 156 lp = netdev_priv(dev);
155 lp->backplane = backplane; 157 lp->backplane = backplane;
156 lp->clockp = clockp & 7; 158 lp->clockp = clockp & 7;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 8b51f632581d..dbf4de39754d 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -72,6 +72,9 @@ static int __devinit com20020pci_probe(struct pci_dev *pdev, const struct pci_de
72 dev = alloc_arcdev(device); 72 dev = alloc_arcdev(device);
73 if (!dev) 73 if (!dev)
74 return -ENOMEM; 74 return -ENOMEM;
75
76 dev->netdev_ops = &com20020_netdev_ops;
77
75 lp = netdev_priv(dev); 78 lp = netdev_priv(dev);
76 79
77 pci_set_drvdata(pdev, dev); 80 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 103688358fb8..651275a5f3d2 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -149,6 +149,14 @@ int com20020_check(struct net_device *dev)
149 return 0; 149 return 0;
150} 150}
151 151
152const struct net_device_ops com20020_netdev_ops = {
153 .ndo_open = arcnet_open,
154 .ndo_stop = arcnet_close,
155 .ndo_start_xmit = arcnet_send_packet,
156 .ndo_tx_timeout = arcnet_timeout,
157 .ndo_set_multicast_list = com20020_set_mc_list,
158};
159
152/* Set up the struct net_device associated with this card. Called after 160/* Set up the struct net_device associated with this card. Called after
153 * probing succeeds. 161 * probing succeeds.
154 */ 162 */
@@ -170,8 +178,6 @@ int com20020_found(struct net_device *dev, int shared)
170 lp->hw.copy_from_card = com20020_copy_from_card; 178 lp->hw.copy_from_card = com20020_copy_from_card;
171 lp->hw.close = com20020_close; 179 lp->hw.close = com20020_close;
172 180
173 dev->set_multicast_list = com20020_set_mc_list;
174
175 if (!dev->dev_addr[0]) 181 if (!dev->dev_addr[0])
176 dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */ 182 dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */
177 183
@@ -342,6 +348,7 @@ static void com20020_set_mc_list(struct net_device *dev)
342 defined(CONFIG_ARCNET_COM20020_CS_MODULE) 348 defined(CONFIG_ARCNET_COM20020_CS_MODULE)
343EXPORT_SYMBOL(com20020_check); 349EXPORT_SYMBOL(com20020_check);
344EXPORT_SYMBOL(com20020_found); 350EXPORT_SYMBOL(com20020_found);
351EXPORT_SYMBOL(com20020_netdev_ops);
345#endif 352#endif
346 353
347MODULE_LICENSE("GPL"); 354MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index 49d39a9cb696..06f8fa2f8f2f 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -88,7 +88,6 @@ MODULE_LICENSE("GPL");
88 */ 88 */
89static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) 89static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
90{ 90{
91 struct arcnet_local *lp = netdev_priv(dev);
92 struct archdr *pkt = (struct archdr *) skb->data; 91 struct archdr *pkt = (struct archdr *) skb->data;
93 struct arc_rfc1051 *soft = &pkt->soft.rfc1051; 92 struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
94 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; 93 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
@@ -112,8 +111,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
112 return htons(ETH_P_ARP); 111 return htons(ETH_P_ARP);
113 112
114 default: 113 default:
115 lp->stats.rx_errors++; 114 dev->stats.rx_errors++;
116 lp->stats.rx_crc_errors++; 115 dev->stats.rx_crc_errors++;
117 return 0; 116 return 0;
118 } 117 }
119 118
@@ -140,7 +139,7 @@ static void rx(struct net_device *dev, int bufnum,
140 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); 139 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
141 if (skb == NULL) { 140 if (skb == NULL) {
142 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); 141 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
143 lp->stats.rx_dropped++; 142 dev->stats.rx_dropped++;
144 return; 143 return;
145 } 144 }
146 skb_put(skb, length + ARC_HDR_SIZE); 145 skb_put(skb, length + ARC_HDR_SIZE);
@@ -168,7 +167,6 @@ static void rx(struct net_device *dev, int bufnum,
168static int build_header(struct sk_buff *skb, struct net_device *dev, 167static int build_header(struct sk_buff *skb, struct net_device *dev,
169 unsigned short type, uint8_t daddr) 168 unsigned short type, uint8_t daddr)
170{ 169{
171 struct arcnet_local *lp = netdev_priv(dev);
172 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; 170 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
173 struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size); 171 struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
174 struct arc_rfc1051 *soft = &pkt->soft.rfc1051; 172 struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
@@ -184,8 +182,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
184 default: 182 default:
185 BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n", 183 BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n",
186 type, type); 184 type, type);
187 lp->stats.tx_errors++; 185 dev->stats.tx_errors++;
188 lp->stats.tx_aborted_errors++; 186 dev->stats.tx_aborted_errors++;
189 return 0; 187 return 0;
190 } 188 }
191 189
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index 2303d3a1f4b6..745530651c45 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -92,7 +92,6 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
92{ 92{
93 struct archdr *pkt = (struct archdr *) skb->data; 93 struct archdr *pkt = (struct archdr *) skb->data;
94 struct arc_rfc1201 *soft = &pkt->soft.rfc1201; 94 struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
95 struct arcnet_local *lp = netdev_priv(dev);
96 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; 95 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
97 96
98 /* Pull off the arcnet header. */ 97 /* Pull off the arcnet header. */
@@ -121,8 +120,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
121 case ARC_P_NOVELL_EC: 120 case ARC_P_NOVELL_EC:
122 return htons(ETH_P_802_3); 121 return htons(ETH_P_802_3);
123 default: 122 default:
124 lp->stats.rx_errors++; 123 dev->stats.rx_errors++;
125 lp->stats.rx_crc_errors++; 124 dev->stats.rx_crc_errors++;
126 return 0; 125 return 0;
127 } 126 }
128 127
@@ -172,8 +171,8 @@ static void rx(struct net_device *dev, int bufnum,
172 in->sequence, soft->split_flag, soft->sequence); 171 in->sequence, soft->split_flag, soft->sequence);
173 lp->rfc1201.aborted_seq = soft->sequence; 172 lp->rfc1201.aborted_seq = soft->sequence;
174 dev_kfree_skb_irq(in->skb); 173 dev_kfree_skb_irq(in->skb);
175 lp->stats.rx_errors++; 174 dev->stats.rx_errors++;
176 lp->stats.rx_missed_errors++; 175 dev->stats.rx_missed_errors++;
177 in->skb = NULL; 176 in->skb = NULL;
178 } 177 }
179 in->sequence = soft->sequence; 178 in->sequence = soft->sequence;
@@ -181,7 +180,7 @@ static void rx(struct net_device *dev, int bufnum,
181 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); 180 skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
182 if (skb == NULL) { 181 if (skb == NULL) {
183 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); 182 BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
184 lp->stats.rx_dropped++; 183 dev->stats.rx_dropped++;
185 return; 184 return;
186 } 185 }
187 skb_put(skb, length + ARC_HDR_SIZE); 186 skb_put(skb, length + ARC_HDR_SIZE);
@@ -213,7 +212,7 @@ static void rx(struct net_device *dev, int bufnum,
213 BUGMSG(D_EXTRA, 212 BUGMSG(D_EXTRA,
214 "ARP source address was 00h, set to %02Xh.\n", 213 "ARP source address was 00h, set to %02Xh.\n",
215 saddr); 214 saddr);
216 lp->stats.rx_crc_errors++; 215 dev->stats.rx_crc_errors++;
217 *cptr = saddr; 216 *cptr = saddr;
218 } else { 217 } else {
219 BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n", 218 BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n",
@@ -222,8 +221,8 @@ static void rx(struct net_device *dev, int bufnum,
222 } else { 221 } else {
223 BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n", 222 BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n",
224 arp->ar_hln, arp->ar_pln); 223 arp->ar_hln, arp->ar_pln);
225 lp->stats.rx_errors++; 224 dev->stats.rx_errors++;
226 lp->stats.rx_crc_errors++; 225 dev->stats.rx_crc_errors++;
227 } 226 }
228 } 227 }
229 BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); 228 BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
@@ -257,8 +256,8 @@ static void rx(struct net_device *dev, int bufnum,
257 soft->split_flag); 256 soft->split_flag);
258 dev_kfree_skb_irq(in->skb); 257 dev_kfree_skb_irq(in->skb);
259 in->skb = NULL; 258 in->skb = NULL;
260 lp->stats.rx_errors++; 259 dev->stats.rx_errors++;
261 lp->stats.rx_missed_errors++; 260 dev->stats.rx_missed_errors++;
262 in->lastpacket = in->numpackets = 0; 261 in->lastpacket = in->numpackets = 0;
263 } 262 }
264 if (soft->split_flag & 1) { /* first packet in split */ 263 if (soft->split_flag & 1) { /* first packet in split */
@@ -269,8 +268,8 @@ static void rx(struct net_device *dev, int bufnum,
269 "(splitflag=%d, seq=%d)\n", 268 "(splitflag=%d, seq=%d)\n",
270 in->sequence, soft->split_flag, 269 in->sequence, soft->split_flag,
271 soft->sequence); 270 soft->sequence);
272 lp->stats.rx_errors++; 271 dev->stats.rx_errors++;
273 lp->stats.rx_missed_errors++; 272 dev->stats.rx_missed_errors++;
274 dev_kfree_skb_irq(in->skb); 273 dev_kfree_skb_irq(in->skb);
275 } 274 }
276 in->sequence = soft->sequence; 275 in->sequence = soft->sequence;
@@ -281,8 +280,8 @@ static void rx(struct net_device *dev, int bufnum,
281 BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n", 280 BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n",
282 soft->split_flag); 281 soft->split_flag);
283 lp->rfc1201.aborted_seq = soft->sequence; 282 lp->rfc1201.aborted_seq = soft->sequence;
284 lp->stats.rx_errors++; 283 dev->stats.rx_errors++;
285 lp->stats.rx_length_errors++; 284 dev->stats.rx_length_errors++;
286 return; 285 return;
287 } 286 }
288 in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE, 287 in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE,
@@ -290,7 +289,7 @@ static void rx(struct net_device *dev, int bufnum,
290 if (skb == NULL) { 289 if (skb == NULL) {
291 BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n"); 290 BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n");
292 lp->rfc1201.aborted_seq = soft->sequence; 291 lp->rfc1201.aborted_seq = soft->sequence;
293 lp->stats.rx_dropped++; 292 dev->stats.rx_dropped++;
294 return; 293 return;
295 } 294 }
296 skb->dev = dev; 295 skb->dev = dev;
@@ -314,8 +313,8 @@ static void rx(struct net_device *dev, int bufnum,
314 "first! (splitflag=%d, seq=%d, aborted=%d)\n", 313 "first! (splitflag=%d, seq=%d, aborted=%d)\n",
315 soft->split_flag, soft->sequence, 314 soft->split_flag, soft->sequence,
316 lp->rfc1201.aborted_seq); 315 lp->rfc1201.aborted_seq);
317 lp->stats.rx_errors++; 316 dev->stats.rx_errors++;
318 lp->stats.rx_missed_errors++; 317 dev->stats.rx_missed_errors++;
319 } 318 }
320 return; 319 return;
321 } 320 }
@@ -325,8 +324,8 @@ static void rx(struct net_device *dev, int bufnum,
325 if (packetnum <= in->lastpacket - 1) { 324 if (packetnum <= in->lastpacket - 1) {
326 BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n", 325 BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n",
327 soft->split_flag); 326 soft->split_flag);
328 lp->stats.rx_errors++; 327 dev->stats.rx_errors++;
329 lp->stats.rx_frame_errors++; 328 dev->stats.rx_frame_errors++;
330 return; 329 return;
331 } 330 }
332 /* "bad" duplicate, kill reassembly */ 331 /* "bad" duplicate, kill reassembly */
@@ -336,8 +335,8 @@ static void rx(struct net_device *dev, int bufnum,
336 lp->rfc1201.aborted_seq = soft->sequence; 335 lp->rfc1201.aborted_seq = soft->sequence;
337 dev_kfree_skb_irq(in->skb); 336 dev_kfree_skb_irq(in->skb);
338 in->skb = NULL; 337 in->skb = NULL;
339 lp->stats.rx_errors++; 338 dev->stats.rx_errors++;
340 lp->stats.rx_missed_errors++; 339 dev->stats.rx_missed_errors++;
341 in->lastpacket = in->numpackets = 0; 340 in->lastpacket = in->numpackets = 0;
342 return; 341 return;
343 } 342 }
@@ -404,8 +403,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
404 default: 403 default:
405 BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n", 404 BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n",
406 type, type); 405 type, type);
407 lp->stats.tx_errors++; 406 dev->stats.tx_errors++;
408 lp->stats.tx_aborted_errors++; 407 dev->stats.tx_aborted_errors++;
409 return 0; 408 return 0;
410 } 409 }
411 410
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 3ec20cc18b0c..cc7708775da0 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -298,7 +298,7 @@ poll_some_more:
298 int more = 0; 298 int more = 0;
299 299
300 spin_lock_irq(&ep->rx_lock); 300 spin_lock_irq(&ep->rx_lock);
301 __netif_rx_complete(napi); 301 __napi_complete(napi);
302 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); 302 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
303 if (ep93xx_have_more_rx(ep)) { 303 if (ep93xx_have_more_rx(ep)) {
304 wrl(ep, REG_INTEN, REG_INTEN_TX); 304 wrl(ep, REG_INTEN, REG_INTEN_TX);
@@ -307,7 +307,7 @@ poll_some_more:
307 } 307 }
308 spin_unlock_irq(&ep->rx_lock); 308 spin_unlock_irq(&ep->rx_lock);
309 309
310 if (more && netif_rx_reschedule(napi)) 310 if (more && napi_reschedule(napi))
311 goto poll_some_more; 311 goto poll_some_more;
312 } 312 }
313 313
@@ -415,9 +415,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
415 415
416 if (status & REG_INTSTS_RX) { 416 if (status & REG_INTSTS_RX) {
417 spin_lock(&ep->rx_lock); 417 spin_lock(&ep->rx_lock);
418 if (likely(netif_rx_schedule_prep(&ep->napi))) { 418 if (likely(napi_schedule_prep(&ep->napi))) {
419 wrl(ep, REG_INTEN, REG_INTEN_TX); 419 wrl(ep, REG_INTEN, REG_INTEN_TX);
420 __netif_rx_schedule(&ep->napi); 420 __napi_schedule(&ep->napi);
421 } 421 }
422 spin_unlock(&ep->rx_lock); 422 spin_unlock(&ep->rx_lock);
423 } 423 }
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 5fce1d5c1a1a..5fe17d5eaa54 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -473,7 +473,7 @@ static void eth_rx_irq(void *pdev)
473 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 473 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
474#endif 474#endif
475 qmgr_disable_irq(port->plat->rxq); 475 qmgr_disable_irq(port->plat->rxq);
476 netif_rx_schedule(&port->napi); 476 napi_schedule(&port->napi);
477} 477}
478 478
479static int eth_poll(struct napi_struct *napi, int budget) 479static int eth_poll(struct napi_struct *napi, int budget)
@@ -498,16 +498,16 @@ static int eth_poll(struct napi_struct *napi, int budget)
498 498
499 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 499 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
500#if DEBUG_RX 500#if DEBUG_RX
501 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n", 501 printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
502 dev->name); 502 dev->name);
503#endif 503#endif
504 netif_rx_complete(napi); 504 napi_complete(napi);
505 qmgr_enable_irq(rxq); 505 qmgr_enable_irq(rxq);
506 if (!qmgr_stat_empty(rxq) && 506 if (!qmgr_stat_empty(rxq) &&
507 netif_rx_reschedule(napi)) { 507 napi_reschedule(napi)) {
508#if DEBUG_RX 508#if DEBUG_RX
509 printk(KERN_DEBUG "%s: eth_poll" 509 printk(KERN_DEBUG "%s: eth_poll"
510 " netif_rx_reschedule successed\n", 510 " napi_reschedule successed\n",
511 dev->name); 511 dev->name);
512#endif 512#endif
513 qmgr_disable_irq(rxq); 513 qmgr_disable_irq(rxq);
@@ -1036,7 +1036,7 @@ static int eth_open(struct net_device *dev)
1036 } 1036 }
1037 ports_open++; 1037 ports_open++;
1038 /* we may already have RX data, enables IRQ */ 1038 /* we may already have RX data, enables IRQ */
1039 netif_rx_schedule(&port->napi); 1039 napi_schedule(&port->napi);
1040 return 0; 1040 return 0;
1041} 1041}
1042 1042
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index bb9094d4cbc9..c758884728a5 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1326,9 +1326,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
1326 AT_WRITE_REG(hw, REG_IMR, 1326 AT_WRITE_REG(hw, REG_IMR,
1327 IMR_NORMAL_MASK & ~ISR_RX_EVENT); 1327 IMR_NORMAL_MASK & ~ISR_RX_EVENT);
1328 AT_WRITE_FLUSH(hw); 1328 AT_WRITE_FLUSH(hw);
1329 if (likely(netif_rx_schedule_prep( 1329 if (likely(napi_schedule_prep(
1330 &adapter->napi))) 1330 &adapter->napi)))
1331 __netif_rx_schedule(&adapter->napi); 1331 __napi_schedule(&adapter->napi);
1332 } 1332 }
1333 } while (--max_ints > 0); 1333 } while (--max_ints > 0);
1334 /* re-enable Interrupt*/ 1334 /* re-enable Interrupt*/
@@ -1514,7 +1514,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
1514 /* If no Tx and not enough Rx work done, exit the polling mode */ 1514 /* If no Tx and not enough Rx work done, exit the polling mode */
1515 if (work_done < budget) { 1515 if (work_done < budget) {
1516quit_polling: 1516quit_polling:
1517 netif_rx_complete(napi); 1517 napi_complete(napi);
1518 imr_data = AT_READ_REG(&adapter->hw, REG_IMR); 1518 imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
1519 AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); 1519 AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
1520 /* test debug */ 1520 /* test debug */
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 9c875bb3f76c..6d76ccb8e296 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -81,24 +81,6 @@ MODULE_AUTHOR(DRV_AUTHOR);
81MODULE_DESCRIPTION(DRV_DESC); 81MODULE_DESCRIPTION(DRV_DESC);
82MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
83 83
84// prototypes
85static void hard_stop(struct net_device *);
86static void enable_rx_tx(struct net_device *dev);
87static struct net_device * au1000_probe(int port_num);
88static int au1000_init(struct net_device *);
89static int au1000_open(struct net_device *);
90static int au1000_close(struct net_device *);
91static int au1000_tx(struct sk_buff *, struct net_device *);
92static int au1000_rx(struct net_device *);
93static irqreturn_t au1000_interrupt(int, void *);
94static void au1000_tx_timeout(struct net_device *);
95static void set_rx_mode(struct net_device *);
96static int au1000_ioctl(struct net_device *, struct ifreq *, int);
97static int au1000_mdio_read(struct net_device *, int, int);
98static void au1000_mdio_write(struct net_device *, int, int, u16);
99static void au1000_adjust_link(struct net_device *);
100static void enable_mac(struct net_device *, int);
101
102/* 84/*
103 * Theory of operation 85 * Theory of operation
104 * 86 *
@@ -188,6 +170,26 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
188# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet 170# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet
189#endif 171#endif
190 172
173static void enable_mac(struct net_device *dev, int force_reset)
174{
175 unsigned long flags;
176 struct au1000_private *aup = netdev_priv(dev);
177
178 spin_lock_irqsave(&aup->lock, flags);
179
180 if(force_reset || (!aup->mac_enabled)) {
181 *aup->enable = MAC_EN_CLOCK_ENABLE;
182 au_sync_delay(2);
183 *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
184 | MAC_EN_CLOCK_ENABLE);
185 au_sync_delay(2);
186
187 aup->mac_enabled = 1;
188 }
189
190 spin_unlock_irqrestore(&aup->lock, flags);
191}
192
191/* 193/*
192 * MII operations 194 * MII operations
193 */ 195 */
@@ -281,6 +283,107 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
281 return 0; 283 return 0;
282} 284}
283 285
286static void hard_stop(struct net_device *dev)
287{
288 struct au1000_private *aup = netdev_priv(dev);
289
290 if (au1000_debug > 4)
291 printk(KERN_INFO "%s: hard stop\n", dev->name);
292
293 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
294 au_sync_delay(10);
295}
296
297static void enable_rx_tx(struct net_device *dev)
298{
299 struct au1000_private *aup = netdev_priv(dev);
300
301 if (au1000_debug > 4)
302 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
303
304 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
305 au_sync_delay(10);
306}
307
308static void
309au1000_adjust_link(struct net_device *dev)
310{
311 struct au1000_private *aup = netdev_priv(dev);
312 struct phy_device *phydev = aup->phy_dev;
313 unsigned long flags;
314
315 int status_change = 0;
316
317 BUG_ON(!aup->phy_dev);
318
319 spin_lock_irqsave(&aup->lock, flags);
320
321 if (phydev->link && (aup->old_speed != phydev->speed)) {
322 // speed changed
323
324 switch(phydev->speed) {
325 case SPEED_10:
326 case SPEED_100:
327 break;
328 default:
329 printk(KERN_WARNING
330 "%s: Speed (%d) is not 10/100 ???\n",
331 dev->name, phydev->speed);
332 break;
333 }
334
335 aup->old_speed = phydev->speed;
336
337 status_change = 1;
338 }
339
340 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
341 // duplex mode changed
342
343 /* switching duplex mode requires to disable rx and tx! */
344 hard_stop(dev);
345
346 if (DUPLEX_FULL == phydev->duplex)
347 aup->mac->control = ((aup->mac->control
348 | MAC_FULL_DUPLEX)
349 & ~MAC_DISABLE_RX_OWN);
350 else
351 aup->mac->control = ((aup->mac->control
352 & ~MAC_FULL_DUPLEX)
353 | MAC_DISABLE_RX_OWN);
354 au_sync_delay(1);
355
356 enable_rx_tx(dev);
357 aup->old_duplex = phydev->duplex;
358
359 status_change = 1;
360 }
361
362 if(phydev->link != aup->old_link) {
363 // link state changed
364
365 if (!phydev->link) {
366 /* link went down */
367 aup->old_speed = 0;
368 aup->old_duplex = -1;
369 }
370
371 aup->old_link = phydev->link;
372 status_change = 1;
373 }
374
375 spin_unlock_irqrestore(&aup->lock, flags);
376
377 if (status_change) {
378 if (phydev->link)
379 printk(KERN_INFO "%s: link up (%d/%s)\n",
380 dev->name, phydev->speed,
381 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
382 else
383 printk(KERN_INFO "%s: link down\n", dev->name);
384 }
385}
386
284static int mii_probe (struct net_device *dev) 387static int mii_probe (struct net_device *dev)
285{ 388{
286 struct au1000_private *const aup = netdev_priv(dev); 389 struct au1000_private *const aup = netdev_priv(dev);
@@ -412,48 +515,6 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
412 aup->pDBfree = pDB; 515 aup->pDBfree = pDB;
413} 516}
414 517
415static void enable_rx_tx(struct net_device *dev)
416{
417 struct au1000_private *aup = netdev_priv(dev);
418
419 if (au1000_debug > 4)
420 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
421
422 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
423 au_sync_delay(10);
424}
425
426static void hard_stop(struct net_device *dev)
427{
428 struct au1000_private *aup = netdev_priv(dev);
429
430 if (au1000_debug > 4)
431 printk(KERN_INFO "%s: hard stop\n", dev->name);
432
433 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
434 au_sync_delay(10);
435}
436
437static void enable_mac(struct net_device *dev, int force_reset)
438{
439 unsigned long flags;
440 struct au1000_private *aup = netdev_priv(dev);
441
442 spin_lock_irqsave(&aup->lock, flags);
443
444 if(force_reset || (!aup->mac_enabled)) {
445 *aup->enable = MAC_EN_CLOCK_ENABLE;
446 au_sync_delay(2);
447 *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
448 | MAC_EN_CLOCK_ENABLE);
449 au_sync_delay(2);
450
451 aup->mac_enabled = 1;
452 }
453
454 spin_unlock_irqrestore(&aup->lock, flags);
455}
456
457static void reset_mac_unlocked(struct net_device *dev) 518static void reset_mac_unlocked(struct net_device *dev)
458{ 519{
459 struct au1000_private *const aup = netdev_priv(dev); 520 struct au1000_private *const aup = netdev_priv(dev);
@@ -542,30 +603,6 @@ static struct {
542static int num_ifs; 603static int num_ifs;
543 604
544/* 605/*
545 * Setup the base address and interrupt of the Au1xxx ethernet macs
546 * based on cpu type and whether the interface is enabled in sys_pinfunc
547 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
548 */
549static int __init au1000_init_module(void)
550{
551 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
552 struct net_device *dev;
553 int i, found_one = 0;
554
555 num_ifs = NUM_ETH_INTERFACES - ni;
556
557 for(i = 0; i < num_ifs; i++) {
558 dev = au1000_probe(i);
559 iflist[i].dev = dev;
560 if (dev)
561 found_one++;
562 }
563 if (!found_one)
564 return -ENODEV;
565 return 0;
566}
567
568/*
569 * ethtool operations 606 * ethtool operations
570 */ 607 */
571 608
@@ -611,199 +648,6 @@ static const struct ethtool_ops au1000_ethtool_ops = {
611 .get_link = ethtool_op_get_link, 648 .get_link = ethtool_op_get_link,
612}; 649};
613 650
614static struct net_device * au1000_probe(int port_num)
615{
616 static unsigned version_printed = 0;
617 struct au1000_private *aup = NULL;
618 struct net_device *dev = NULL;
619 db_dest_t *pDB, *pDBfree;
620 char ethaddr[6];
621 int irq, i, err;
622 u32 base, macen;
623
624 if (port_num >= NUM_ETH_INTERFACES)
625 return NULL;
626
627 base = CPHYSADDR(iflist[port_num].base_addr );
628 macen = CPHYSADDR(iflist[port_num].macen_addr);
629 irq = iflist[port_num].irq;
630
631 if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
632 !request_mem_region(macen, 4, "Au1x00 ENET"))
633 return NULL;
634
635 if (version_printed++ == 0)
636 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
637
638 dev = alloc_etherdev(sizeof(struct au1000_private));
639 if (!dev) {
640 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
641 return NULL;
642 }
643
644 if ((err = register_netdev(dev)) != 0) {
645 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
646 DRV_NAME, err);
647 free_netdev(dev);
648 return NULL;
649 }
650
651 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
652 dev->name, base, irq);
653
654 aup = netdev_priv(dev);
655
656 spin_lock_init(&aup->lock);
657
658 /* Allocate the data buffers */
659 /* Snooping works fine with eth on all au1xxx */
660 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
661 (NUM_TX_BUFFS + NUM_RX_BUFFS),
662 &aup->dma_addr, 0);
663 if (!aup->vaddr) {
664 free_netdev(dev);
665 release_mem_region( base, MAC_IOSIZE);
666 release_mem_region(macen, 4);
667 return NULL;
668 }
669
670 /* aup->mac is the base address of the MAC's registers */
671 aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
672
673 /* Setup some variables for quick register address access */
674 aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
675 aup->mac_id = port_num;
676 au_macs[port_num] = aup;
677
678 if (port_num == 0) {
679 if (prom_get_ethernet_addr(ethaddr) == 0)
680 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
681 else {
682 printk(KERN_INFO "%s: No MAC address found\n",
683 dev->name);
684 /* Use the hard coded MAC addresses */
685 }
686
687 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
688 } else if (port_num == 1)
689 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
690
691 /*
692 * Assign to the Ethernet ports two consecutive MAC addresses
693 * to match those that are printed on their stickers
694 */
695 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
696 dev->dev_addr[5] += port_num;
697
698 *aup->enable = 0;
699 aup->mac_enabled = 0;
700
701 aup->mii_bus = mdiobus_alloc();
702 if (aup->mii_bus == NULL)
703 goto err_out;
704
705 aup->mii_bus->priv = dev;
706 aup->mii_bus->read = au1000_mdiobus_read;
707 aup->mii_bus->write = au1000_mdiobus_write;
708 aup->mii_bus->reset = au1000_mdiobus_reset;
709 aup->mii_bus->name = "au1000_eth_mii";
710 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
711 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
712 for(i = 0; i < PHY_MAX_ADDR; ++i)
713 aup->mii_bus->irq[i] = PHY_POLL;
714
715 /* if known, set corresponding PHY IRQs */
716#if defined(AU1XXX_PHY_STATIC_CONFIG)
717# if defined(AU1XXX_PHY0_IRQ)
718 if (AU1XXX_PHY0_BUSID == aup->mac_id)
719 aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
720# endif
721# if defined(AU1XXX_PHY1_IRQ)
722 if (AU1XXX_PHY1_BUSID == aup->mac_id)
723 aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
724# endif
725#endif
726 mdiobus_register(aup->mii_bus);
727
728 if (mii_probe(dev) != 0) {
729 goto err_out;
730 }
731
732 pDBfree = NULL;
733 /* setup the data buffer descriptors and attach a buffer to each one */
734 pDB = aup->db;
735 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
736 pDB->pnext = pDBfree;
737 pDBfree = pDB;
738 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
739 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
740 pDB++;
741 }
742 aup->pDBfree = pDBfree;
743
744 for (i = 0; i < NUM_RX_DMA; i++) {
745 pDB = GetFreeDB(aup);
746 if (!pDB) {
747 goto err_out;
748 }
749 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
750 aup->rx_db_inuse[i] = pDB;
751 }
752 for (i = 0; i < NUM_TX_DMA; i++) {
753 pDB = GetFreeDB(aup);
754 if (!pDB) {
755 goto err_out;
756 }
757 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
758 aup->tx_dma_ring[i]->len = 0;
759 aup->tx_db_inuse[i] = pDB;
760 }
761
762 dev->base_addr = base;
763 dev->irq = irq;
764 dev->open = au1000_open;
765 dev->hard_start_xmit = au1000_tx;
766 dev->stop = au1000_close;
767 dev->set_multicast_list = &set_rx_mode;
768 dev->do_ioctl = &au1000_ioctl;
769 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
770 dev->tx_timeout = au1000_tx_timeout;
771 dev->watchdog_timeo = ETH_TX_TIMEOUT;
772
773 /*
774 * The boot code uses the ethernet controller, so reset it to start
775 * fresh. au1000_init() expects that the device is in reset state.
776 */
777 reset_mac(dev);
778
779 return dev;
780
781err_out:
782 if (aup->mii_bus != NULL) {
783 mdiobus_unregister(aup->mii_bus);
784 mdiobus_free(aup->mii_bus);
785 }
786
787 /* here we should have a valid dev plus aup-> register addresses
788 * so we can reset the mac properly.*/
789 reset_mac(dev);
790
791 for (i = 0; i < NUM_RX_DMA; i++) {
792 if (aup->rx_db_inuse[i])
793 ReleaseDB(aup, aup->rx_db_inuse[i]);
794 }
795 for (i = 0; i < NUM_TX_DMA; i++) {
796 if (aup->tx_db_inuse[i])
797 ReleaseDB(aup, aup->tx_db_inuse[i]);
798 }
799 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
800 (void *)aup->vaddr, aup->dma_addr);
801 unregister_netdev(dev);
802 free_netdev(dev);
803 release_mem_region( base, MAC_IOSIZE);
804 release_mem_region(macen, 4);
805 return NULL;
806}
807 651
808/* 652/*
809 * Initialize the interface. 653 * Initialize the interface.
@@ -864,83 +708,170 @@ static int au1000_init(struct net_device *dev)
864 return 0; 708 return 0;
865} 709}
866 710
867static void 711static inline void update_rx_stats(struct net_device *dev, u32 status)
868au1000_adjust_link(struct net_device *dev)
869{ 712{
870 struct au1000_private *aup = netdev_priv(dev); 713 struct au1000_private *aup = netdev_priv(dev);
871 struct phy_device *phydev = aup->phy_dev; 714 struct net_device_stats *ps = &dev->stats;
872 unsigned long flags;
873 715
874 int status_change = 0; 716 ps->rx_packets++;
717 if (status & RX_MCAST_FRAME)
718 ps->multicast++;
875 719
876 BUG_ON(!aup->phy_dev); 720 if (status & RX_ERROR) {
721 ps->rx_errors++;
722 if (status & RX_MISSED_FRAME)
723 ps->rx_missed_errors++;
724 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
725 ps->rx_length_errors++;
726 if (status & RX_CRC_ERROR)
727 ps->rx_crc_errors++;
728 if (status & RX_COLL)
729 ps->collisions++;
730 }
731 else
732 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
877 733
878 spin_lock_irqsave(&aup->lock, flags); 734}
879 735
880 if (phydev->link && (aup->old_speed != phydev->speed)) { 736/*
881 // speed changed 737 * Au1000 receive routine.
738 */
739static int au1000_rx(struct net_device *dev)
740{
741 struct au1000_private *aup = netdev_priv(dev);
742 struct sk_buff *skb;
743 volatile rx_dma_t *prxd;
744 u32 buff_stat, status;
745 db_dest_t *pDB;
746 u32 frmlen;
882 747
883 switch(phydev->speed) { 748 if (au1000_debug > 5)
884 case SPEED_10: 749 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
885 case SPEED_100:
886 break;
887 default:
888 printk(KERN_WARNING
889 "%s: Speed (%d) is not 10/100 ???\n",
890 dev->name, phydev->speed);
891 break;
892 }
893 750
894 aup->old_speed = phydev->speed; 751 prxd = aup->rx_dma_ring[aup->rx_head];
752 buff_stat = prxd->buff_stat;
753 while (buff_stat & RX_T_DONE) {
754 status = prxd->status;
755 pDB = aup->rx_db_inuse[aup->rx_head];
756 update_rx_stats(dev, status);
757 if (!(status & RX_ERROR)) {
895 758
896 status_change = 1; 759 /* good frame */
760 frmlen = (status & RX_FRAME_LEN_MASK);
761 frmlen -= 4; /* Remove FCS */
762 skb = dev_alloc_skb(frmlen + 2);
763 if (skb == NULL) {
764 printk(KERN_ERR
765 "%s: Memory squeeze, dropping packet.\n",
766 dev->name);
767 dev->stats.rx_dropped++;
768 continue;
769 }
770 skb_reserve(skb, 2); /* 16 byte IP header align */
771 skb_copy_to_linear_data(skb,
772 (unsigned char *)pDB->vaddr, frmlen);
773 skb_put(skb, frmlen);
774 skb->protocol = eth_type_trans(skb, dev);
775 netif_rx(skb); /* pass the packet to upper layers */
776 }
777 else {
778 if (au1000_debug > 4) {
779 if (status & RX_MISSED_FRAME)
780 printk("rx miss\n");
781 if (status & RX_WDOG_TIMER)
782 printk("rx wdog\n");
783 if (status & RX_RUNT)
784 printk("rx runt\n");
785 if (status & RX_OVERLEN)
786 printk("rx overlen\n");
787 if (status & RX_COLL)
788 printk("rx coll\n");
789 if (status & RX_MII_ERROR)
790 printk("rx mii error\n");
791 if (status & RX_CRC_ERROR)
792 printk("rx crc error\n");
793 if (status & RX_LEN_ERROR)
794 printk("rx len error\n");
795 if (status & RX_U_CNTRL_FRAME)
796 printk("rx u control frame\n");
797 if (status & RX_MISSED_FRAME)
798 printk("rx miss\n");
799 }
800 }
801 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
802 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
803 au_sync();
804
805 /* next descriptor */
806 prxd = aup->rx_dma_ring[aup->rx_head];
807 buff_stat = prxd->buff_stat;
897 } 808 }
809 return 0;
810}
898 811
899 if (phydev->link && (aup->old_duplex != phydev->duplex)) { 812static void update_tx_stats(struct net_device *dev, u32 status)
900 // duplex mode changed 813{
814 struct au1000_private *aup = netdev_priv(dev);
815 struct net_device_stats *ps = &dev->stats;
901 816
902 /* switching duplex mode requires to disable rx and tx! */ 817 if (status & TX_FRAME_ABORTED) {
903 hard_stop(dev); 818 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
819 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
820 /* any other tx errors are only valid
821 * in half duplex mode */
822 ps->tx_errors++;
823 ps->tx_aborted_errors++;
824 }
825 }
826 else {
827 ps->tx_errors++;
828 ps->tx_aborted_errors++;
829 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
830 ps->tx_carrier_errors++;
831 }
832 }
833}
904 834
905 if (DUPLEX_FULL == phydev->duplex) 835/*
906 aup->mac->control = ((aup->mac->control 836 * Called from the interrupt service routine to acknowledge
907 | MAC_FULL_DUPLEX) 837 * the TX DONE bits. This is a must if the irq is setup as
908 & ~MAC_DISABLE_RX_OWN); 838 * edge triggered.
909 else 839 */
910 aup->mac->control = ((aup->mac->control 840static void au1000_tx_ack(struct net_device *dev)
911 & ~MAC_FULL_DUPLEX) 841{
912 | MAC_DISABLE_RX_OWN); 842 struct au1000_private *aup = netdev_priv(dev);
913 au_sync_delay(1); 843 volatile tx_dma_t *ptxd;
914 844
915 enable_rx_tx(dev); 845 ptxd = aup->tx_dma_ring[aup->tx_tail];
916 aup->old_duplex = phydev->duplex;
917 846
918 status_change = 1; 847 while (ptxd->buff_stat & TX_T_DONE) {
919 } 848 update_tx_stats(dev, ptxd->status);
849 ptxd->buff_stat &= ~TX_T_DONE;
850 ptxd->len = 0;
851 au_sync();
920 852
921 if(phydev->link != aup->old_link) { 853 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
922 // link state changed 854 ptxd = aup->tx_dma_ring[aup->tx_tail];
923 855
924 if (!phydev->link) { 856 if (aup->tx_full) {
925 /* link went down */ 857 aup->tx_full = 0;
926 aup->old_speed = 0; 858 netif_wake_queue(dev);
927 aup->old_duplex = -1;
928 } 859 }
929
930 aup->old_link = phydev->link;
931 status_change = 1;
932 } 860 }
861}
933 862
934 spin_unlock_irqrestore(&aup->lock, flags); 863/*
864 * Au1000 interrupt service routine.
865 */
866static irqreturn_t au1000_interrupt(int irq, void *dev_id)
867{
868 struct net_device *dev = dev_id;
935 869
936 if (status_change) { 870 /* Handle RX interrupts first to minimize chance of overrun */
937 if (phydev->link) 871
938 printk(KERN_INFO "%s: link up (%d/%s)\n", 872 au1000_rx(dev);
939 dev->name, phydev->speed, 873 au1000_tx_ack(dev);
940 DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); 874 return IRQ_RETVAL(1);
941 else
942 printk(KERN_INFO "%s: link down\n", dev->name);
943 }
944} 875}
945 876
946static int au1000_open(struct net_device *dev) 877static int au1000_open(struct net_device *dev)
@@ -1003,88 +934,6 @@ static int au1000_close(struct net_device *dev)
1003 return 0; 934 return 0;
1004} 935}
1005 936
1006static void __exit au1000_cleanup_module(void)
1007{
1008 int i, j;
1009 struct net_device *dev;
1010 struct au1000_private *aup;
1011
1012 for (i = 0; i < num_ifs; i++) {
1013 dev = iflist[i].dev;
1014 if (dev) {
1015 aup = netdev_priv(dev);
1016 unregister_netdev(dev);
1017 mdiobus_unregister(aup->mii_bus);
1018 mdiobus_free(aup->mii_bus);
1019 for (j = 0; j < NUM_RX_DMA; j++)
1020 if (aup->rx_db_inuse[j])
1021 ReleaseDB(aup, aup->rx_db_inuse[j]);
1022 for (j = 0; j < NUM_TX_DMA; j++)
1023 if (aup->tx_db_inuse[j])
1024 ReleaseDB(aup, aup->tx_db_inuse[j]);
1025 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1026 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1027 (void *)aup->vaddr, aup->dma_addr);
1028 release_mem_region(dev->base_addr, MAC_IOSIZE);
1029 release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
1030 free_netdev(dev);
1031 }
1032 }
1033}
1034
1035static void update_tx_stats(struct net_device *dev, u32 status)
1036{
1037 struct au1000_private *aup = netdev_priv(dev);
1038 struct net_device_stats *ps = &dev->stats;
1039
1040 if (status & TX_FRAME_ABORTED) {
1041 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
1042 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
1043 /* any other tx errors are only valid
1044 * in half duplex mode */
1045 ps->tx_errors++;
1046 ps->tx_aborted_errors++;
1047 }
1048 }
1049 else {
1050 ps->tx_errors++;
1051 ps->tx_aborted_errors++;
1052 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
1053 ps->tx_carrier_errors++;
1054 }
1055 }
1056}
1057
1058
1059/*
1060 * Called from the interrupt service routine to acknowledge
1061 * the TX DONE bits. This is a must if the irq is setup as
1062 * edge triggered.
1063 */
1064static void au1000_tx_ack(struct net_device *dev)
1065{
1066 struct au1000_private *aup = netdev_priv(dev);
1067 volatile tx_dma_t *ptxd;
1068
1069 ptxd = aup->tx_dma_ring[aup->tx_tail];
1070
1071 while (ptxd->buff_stat & TX_T_DONE) {
1072 update_tx_stats(dev, ptxd->status);
1073 ptxd->buff_stat &= ~TX_T_DONE;
1074 ptxd->len = 0;
1075 au_sync();
1076
1077 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
1078 ptxd = aup->tx_dma_ring[aup->tx_tail];
1079
1080 if (aup->tx_full) {
1081 aup->tx_full = 0;
1082 netif_wake_queue(dev);
1083 }
1084 }
1085}
1086
1087
1088/* 937/*
1089 * Au1000 transmit routine. 938 * Au1000 transmit routine.
1090 */ 939 */
@@ -1142,123 +991,6 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1142 return 0; 991 return 0;
1143} 992}
1144 993
1145static inline void update_rx_stats(struct net_device *dev, u32 status)
1146{
1147 struct au1000_private *aup = netdev_priv(dev);
1148 struct net_device_stats *ps = &dev->stats;
1149
1150 ps->rx_packets++;
1151 if (status & RX_MCAST_FRAME)
1152 ps->multicast++;
1153
1154 if (status & RX_ERROR) {
1155 ps->rx_errors++;
1156 if (status & RX_MISSED_FRAME)
1157 ps->rx_missed_errors++;
1158 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
1159 ps->rx_length_errors++;
1160 if (status & RX_CRC_ERROR)
1161 ps->rx_crc_errors++;
1162 if (status & RX_COLL)
1163 ps->collisions++;
1164 }
1165 else
1166 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
1167
1168}
1169
1170/*
1171 * Au1000 receive routine.
1172 */
1173static int au1000_rx(struct net_device *dev)
1174{
1175 struct au1000_private *aup = netdev_priv(dev);
1176 struct sk_buff *skb;
1177 volatile rx_dma_t *prxd;
1178 u32 buff_stat, status;
1179 db_dest_t *pDB;
1180 u32 frmlen;
1181
1182 if (au1000_debug > 5)
1183 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
1184
1185 prxd = aup->rx_dma_ring[aup->rx_head];
1186 buff_stat = prxd->buff_stat;
1187 while (buff_stat & RX_T_DONE) {
1188 status = prxd->status;
1189 pDB = aup->rx_db_inuse[aup->rx_head];
1190 update_rx_stats(dev, status);
1191 if (!(status & RX_ERROR)) {
1192
1193 /* good frame */
1194 frmlen = (status & RX_FRAME_LEN_MASK);
1195 frmlen -= 4; /* Remove FCS */
1196 skb = dev_alloc_skb(frmlen + 2);
1197 if (skb == NULL) {
1198 printk(KERN_ERR
1199 "%s: Memory squeeze, dropping packet.\n",
1200 dev->name);
1201 dev->stats.rx_dropped++;
1202 continue;
1203 }
1204 skb_reserve(skb, 2); /* 16 byte IP header align */
1205 skb_copy_to_linear_data(skb,
1206 (unsigned char *)pDB->vaddr, frmlen);
1207 skb_put(skb, frmlen);
1208 skb->protocol = eth_type_trans(skb, dev);
1209 netif_rx(skb); /* pass the packet to upper layers */
1210 }
1211 else {
1212 if (au1000_debug > 4) {
1213 if (status & RX_MISSED_FRAME)
1214 printk("rx miss\n");
1215 if (status & RX_WDOG_TIMER)
1216 printk("rx wdog\n");
1217 if (status & RX_RUNT)
1218 printk("rx runt\n");
1219 if (status & RX_OVERLEN)
1220 printk("rx overlen\n");
1221 if (status & RX_COLL)
1222 printk("rx coll\n");
1223 if (status & RX_MII_ERROR)
1224 printk("rx mii error\n");
1225 if (status & RX_CRC_ERROR)
1226 printk("rx crc error\n");
1227 if (status & RX_LEN_ERROR)
1228 printk("rx len error\n");
1229 if (status & RX_U_CNTRL_FRAME)
1230 printk("rx u control frame\n");
1231 if (status & RX_MISSED_FRAME)
1232 printk("rx miss\n");
1233 }
1234 }
1235 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
1236 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
1237 au_sync();
1238
1239 /* next descriptor */
1240 prxd = aup->rx_dma_ring[aup->rx_head];
1241 buff_stat = prxd->buff_stat;
1242 }
1243 return 0;
1244}
1245
1246
1247/*
1248 * Au1000 interrupt service routine.
1249 */
1250static irqreturn_t au1000_interrupt(int irq, void *dev_id)
1251{
1252 struct net_device *dev = dev_id;
1253
1254 /* Handle RX interrupts first to minimize chance of overrun */
1255
1256 au1000_rx(dev);
1257 au1000_tx_ack(dev);
1258 return IRQ_RETVAL(1);
1259}
1260
1261
1262/* 994/*
1263 * The Tx ring has been full longer than the watchdog timeout 995 * The Tx ring has been full longer than the watchdog timeout
1264 * value. The transmitter must be hung? 996 * value. The transmitter must be hung?
@@ -1315,5 +1047,252 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1315 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); 1047 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
1316} 1048}
1317 1049
1050static struct net_device * au1000_probe(int port_num)
1051{
1052 static unsigned version_printed = 0;
1053 struct au1000_private *aup = NULL;
1054 struct net_device *dev = NULL;
1055 db_dest_t *pDB, *pDBfree;
1056 char ethaddr[6];
1057 int irq, i, err;
1058 u32 base, macen;
1059
1060 if (port_num >= NUM_ETH_INTERFACES)
1061 return NULL;
1062
1063 base = CPHYSADDR(iflist[port_num].base_addr );
1064 macen = CPHYSADDR(iflist[port_num].macen_addr);
1065 irq = iflist[port_num].irq;
1066
1067 if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
1068 !request_mem_region(macen, 4, "Au1x00 ENET"))
1069 return NULL;
1070
1071 if (version_printed++ == 0)
1072 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1073
1074 dev = alloc_etherdev(sizeof(struct au1000_private));
1075 if (!dev) {
1076 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
1077 return NULL;
1078 }
1079
1080 if ((err = register_netdev(dev)) != 0) {
1081 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
1082 DRV_NAME, err);
1083 free_netdev(dev);
1084 return NULL;
1085 }
1086
1087 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
1088 dev->name, base, irq);
1089
1090 aup = netdev_priv(dev);
1091
1092 spin_lock_init(&aup->lock);
1093
1094 /* Allocate the data buffers */
1095 /* Snooping works fine with eth on all au1xxx */
1096 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1097 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1098 &aup->dma_addr, 0);
1099 if (!aup->vaddr) {
1100 free_netdev(dev);
1101 release_mem_region( base, MAC_IOSIZE);
1102 release_mem_region(macen, 4);
1103 return NULL;
1104 }
1105
1106 /* aup->mac is the base address of the MAC's registers */
1107 aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
1108
1109 /* Setup some variables for quick register address access */
1110 aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
1111 aup->mac_id = port_num;
1112 au_macs[port_num] = aup;
1113
1114 if (port_num == 0) {
1115 if (prom_get_ethernet_addr(ethaddr) == 0)
1116 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1117 else {
1118 printk(KERN_INFO "%s: No MAC address found\n",
1119 dev->name);
1120 /* Use the hard coded MAC addresses */
1121 }
1122
1123 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1124 } else if (port_num == 1)
1125 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1126
1127 /*
1128 * Assign to the Ethernet ports two consecutive MAC addresses
1129 * to match those that are printed on their stickers
1130 */
1131 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1132 dev->dev_addr[5] += port_num;
1133
1134 *aup->enable = 0;
1135 aup->mac_enabled = 0;
1136
1137 aup->mii_bus = mdiobus_alloc();
1138 if (aup->mii_bus == NULL)
1139 goto err_out;
1140
1141 aup->mii_bus->priv = dev;
1142 aup->mii_bus->read = au1000_mdiobus_read;
1143 aup->mii_bus->write = au1000_mdiobus_write;
1144 aup->mii_bus->reset = au1000_mdiobus_reset;
1145 aup->mii_bus->name = "au1000_eth_mii";
1146 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
1147 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1148 for(i = 0; i < PHY_MAX_ADDR; ++i)
1149 aup->mii_bus->irq[i] = PHY_POLL;
1150
1151 /* if known, set corresponding PHY IRQs */
1152#if defined(AU1XXX_PHY_STATIC_CONFIG)
1153# if defined(AU1XXX_PHY0_IRQ)
1154 if (AU1XXX_PHY0_BUSID == aup->mac_id)
1155 aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
1156# endif
1157# if defined(AU1XXX_PHY1_IRQ)
1158 if (AU1XXX_PHY1_BUSID == aup->mac_id)
1159 aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
1160# endif
1161#endif
1162 mdiobus_register(aup->mii_bus);
1163
1164 if (mii_probe(dev) != 0) {
1165 goto err_out;
1166 }
1167
1168 pDBfree = NULL;
1169 /* setup the data buffer descriptors and attach a buffer to each one */
1170 pDB = aup->db;
1171 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1172 pDB->pnext = pDBfree;
1173 pDBfree = pDB;
1174 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1175 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1176 pDB++;
1177 }
1178 aup->pDBfree = pDBfree;
1179
1180 for (i = 0; i < NUM_RX_DMA; i++) {
1181 pDB = GetFreeDB(aup);
1182 if (!pDB) {
1183 goto err_out;
1184 }
1185 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1186 aup->rx_db_inuse[i] = pDB;
1187 }
1188 for (i = 0; i < NUM_TX_DMA; i++) {
1189 pDB = GetFreeDB(aup);
1190 if (!pDB) {
1191 goto err_out;
1192 }
1193 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1194 aup->tx_dma_ring[i]->len = 0;
1195 aup->tx_db_inuse[i] = pDB;
1196 }
1197
1198 dev->base_addr = base;
1199 dev->irq = irq;
1200 dev->open = au1000_open;
1201 dev->hard_start_xmit = au1000_tx;
1202 dev->stop = au1000_close;
1203 dev->set_multicast_list = &set_rx_mode;
1204 dev->do_ioctl = &au1000_ioctl;
1205 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1206 dev->tx_timeout = au1000_tx_timeout;
1207 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1208
1209 /*
1210 * The boot code uses the ethernet controller, so reset it to start
1211 * fresh. au1000_init() expects that the device is in reset state.
1212 */
1213 reset_mac(dev);
1214
1215 return dev;
1216
1217err_out:
1218 if (aup->mii_bus != NULL) {
1219 mdiobus_unregister(aup->mii_bus);
1220 mdiobus_free(aup->mii_bus);
1221 }
1222
1223 /* here we should have a valid dev plus aup-> register addresses
1224 * so we can reset the mac properly.*/
1225 reset_mac(dev);
1226
1227 for (i = 0; i < NUM_RX_DMA; i++) {
1228 if (aup->rx_db_inuse[i])
1229 ReleaseDB(aup, aup->rx_db_inuse[i]);
1230 }
1231 for (i = 0; i < NUM_TX_DMA; i++) {
1232 if (aup->tx_db_inuse[i])
1233 ReleaseDB(aup, aup->tx_db_inuse[i]);
1234 }
1235 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1236 (void *)aup->vaddr, aup->dma_addr);
1237 unregister_netdev(dev);
1238 free_netdev(dev);
1239 release_mem_region( base, MAC_IOSIZE);
1240 release_mem_region(macen, 4);
1241 return NULL;
1242}
1243
1244/*
1245 * Setup the base address and interrupt of the Au1xxx ethernet macs
1246 * based on cpu type and whether the interface is enabled in sys_pinfunc
1247 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1248 */
1249static int __init au1000_init_module(void)
1250{
1251 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
1252 struct net_device *dev;
1253 int i, found_one = 0;
1254
1255 num_ifs = NUM_ETH_INTERFACES - ni;
1256
1257 for(i = 0; i < num_ifs; i++) {
1258 dev = au1000_probe(i);
1259 iflist[i].dev = dev;
1260 if (dev)
1261 found_one++;
1262 }
1263 if (!found_one)
1264 return -ENODEV;
1265 return 0;
1266}
1267
1268static void __exit au1000_cleanup_module(void)
1269{
1270 int i, j;
1271 struct net_device *dev;
1272 struct au1000_private *aup;
1273
1274 for (i = 0; i < num_ifs; i++) {
1275 dev = iflist[i].dev;
1276 if (dev) {
1277 aup = netdev_priv(dev);
1278 unregister_netdev(dev);
1279 mdiobus_unregister(aup->mii_bus);
1280 mdiobus_free(aup->mii_bus);
1281 for (j = 0; j < NUM_RX_DMA; j++)
1282 if (aup->rx_db_inuse[j])
1283 ReleaseDB(aup, aup->rx_db_inuse[j]);
1284 for (j = 0; j < NUM_TX_DMA; j++)
1285 if (aup->tx_db_inuse[j])
1286 ReleaseDB(aup, aup->tx_db_inuse[j]);
1287 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1288 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1289 (void *)aup->vaddr, aup->dma_addr);
1290 release_mem_region(dev->base_addr, MAC_IOSIZE);
1291 release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
1292 free_netdev(dev);
1293 }
1294 }
1295}
1296
1318module_init(au1000_init_module); 1297module_init(au1000_init_module);
1319module_exit(au1000_cleanup_module); 1298module_exit(au1000_cleanup_module);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c38512ebcea6..92aaaa1ee9f1 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -874,7 +874,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
874 } 874 }
875 875
876 if (work_done < budget) { 876 if (work_done < budget) {
877 netif_rx_complete(napi); 877 napi_complete(napi);
878 b44_enable_ints(bp); 878 b44_enable_ints(bp);
879 } 879 }
880 880
@@ -906,13 +906,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
906 goto irq_ack; 906 goto irq_ack;
907 } 907 }
908 908
909 if (netif_rx_schedule_prep(&bp->napi)) { 909 if (napi_schedule_prep(&bp->napi)) {
910 /* NOTE: These writes are posted by the readback of 910 /* NOTE: These writes are posted by the readback of
911 * the ISTAT register below. 911 * the ISTAT register below.
912 */ 912 */
913 bp->istat = istat; 913 bp->istat = istat;
914 __b44_disable_ints(bp); 914 __b44_disable_ints(bp);
915 __netif_rx_schedule(&bp->napi); 915 __napi_schedule(&bp->napi);
916 } else { 916 } else {
917 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", 917 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
918 dev->name); 918 dev->name);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d4a3dac21dcf..fe575b9a9b73 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1497,6 +1497,8 @@ static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1497 1497
1498static int 1498static int
1499bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) 1499bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1500__releases(&bp->phy_lock)
1501__acquires(&bp->phy_lock)
1500{ 1502{
1501 u32 speed_arg = 0, pause_adv; 1503 u32 speed_arg = 0, pause_adv;
1502 1504
@@ -1554,6 +1556,8 @@ bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1554 1556
1555static int 1557static int
1556bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) 1558bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1559__releases(&bp->phy_lock)
1560__acquires(&bp->phy_lock)
1557{ 1561{
1558 u32 adv, bmcr; 1562 u32 adv, bmcr;
1559 u32 new_adv = 0; 1563 u32 new_adv = 0;
@@ -1866,6 +1870,8 @@ bnx2_set_remote_link(struct bnx2 *bp)
1866 1870
1867static int 1871static int
1868bnx2_setup_copper_phy(struct bnx2 *bp) 1872bnx2_setup_copper_phy(struct bnx2 *bp)
1873__releases(&bp->phy_lock)
1874__acquires(&bp->phy_lock)
1869{ 1875{
1870 u32 bmcr; 1876 u32 bmcr;
1871 u32 new_bmcr; 1877 u32 new_bmcr;
@@ -1963,6 +1969,8 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
1963 1969
1964static int 1970static int
1965bnx2_setup_phy(struct bnx2 *bp, u8 port) 1971bnx2_setup_phy(struct bnx2 *bp, u8 port)
1972__releases(&bp->phy_lock)
1973__acquires(&bp->phy_lock)
1966{ 1974{
1967 if (bp->loopback == MAC_LOOPBACK) 1975 if (bp->loopback == MAC_LOOPBACK)
1968 return 0; 1976 return 0;
@@ -2176,6 +2184,8 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2176 2184
2177static int 2185static int
2178bnx2_init_phy(struct bnx2 *bp, int reset_phy) 2186bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2187__releases(&bp->phy_lock)
2188__acquires(&bp->phy_lock)
2179{ 2189{
2180 u32 val; 2190 u32 val;
2181 int rc = 0; 2191 int rc = 0;
@@ -3053,7 +3063,7 @@ bnx2_msi(int irq, void *dev_instance)
3053 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3063 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3054 return IRQ_HANDLED; 3064 return IRQ_HANDLED;
3055 3065
3056 netif_rx_schedule(&bnapi->napi); 3066 napi_schedule(&bnapi->napi);
3057 3067
3058 return IRQ_HANDLED; 3068 return IRQ_HANDLED;
3059} 3069}
@@ -3070,7 +3080,7 @@ bnx2_msi_1shot(int irq, void *dev_instance)
3070 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3080 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3071 return IRQ_HANDLED; 3081 return IRQ_HANDLED;
3072 3082
3073 netif_rx_schedule(&bnapi->napi); 3083 napi_schedule(&bnapi->napi);
3074 3084
3075 return IRQ_HANDLED; 3085 return IRQ_HANDLED;
3076} 3086}
@@ -3106,9 +3116,9 @@ bnx2_interrupt(int irq, void *dev_instance)
3106 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3116 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3107 return IRQ_HANDLED; 3117 return IRQ_HANDLED;
3108 3118
3109 if (netif_rx_schedule_prep(&bnapi->napi)) { 3119 if (napi_schedule_prep(&bnapi->napi)) {
3110 bnapi->last_status_idx = sblk->status_idx; 3120 bnapi->last_status_idx = sblk->status_idx;
3111 __netif_rx_schedule(&bnapi->napi); 3121 __napi_schedule(&bnapi->napi);
3112 } 3122 }
3113 3123
3114 return IRQ_HANDLED; 3124 return IRQ_HANDLED;
@@ -3218,7 +3228,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3218 rmb(); 3228 rmb();
3219 if (likely(!bnx2_has_fast_work(bnapi))) { 3229 if (likely(!bnx2_has_fast_work(bnapi))) {
3220 3230
3221 netif_rx_complete(napi); 3231 napi_complete(napi);
3222 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 3232 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3223 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3233 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3224 bnapi->last_status_idx); 3234 bnapi->last_status_idx);
@@ -3251,7 +3261,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3251 3261
3252 rmb(); 3262 rmb();
3253 if (likely(!bnx2_has_work(bnapi))) { 3263 if (likely(!bnx2_has_work(bnapi))) {
3254 netif_rx_complete(napi); 3264 napi_complete(napi);
3255 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { 3265 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3256 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3266 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3257 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3267 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index d3e7775a9ccf..71f81c79d638 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1654,7 +1654,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1654 prefetch(&fp->status_blk->c_status_block.status_block_index); 1654 prefetch(&fp->status_blk->c_status_block.status_block_index);
1655 prefetch(&fp->status_blk->u_status_block.status_block_index); 1655 prefetch(&fp->status_blk->u_status_block.status_block_index);
1656 1656
1657 netif_rx_schedule(&bnx2x_fp(bp, index, napi)); 1657 napi_schedule(&bnx2x_fp(bp, index, napi));
1658 1658
1659 return IRQ_HANDLED; 1659 return IRQ_HANDLED;
1660} 1660}
@@ -1693,7 +1693,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1693 prefetch(&fp->status_blk->c_status_block.status_block_index); 1693 prefetch(&fp->status_blk->c_status_block.status_block_index);
1694 prefetch(&fp->status_blk->u_status_block.status_block_index); 1694 prefetch(&fp->status_blk->u_status_block.status_block_index);
1695 1695
1696 netif_rx_schedule(&bnx2x_fp(bp, 0, napi)); 1696 napi_schedule(&bnx2x_fp(bp, 0, napi));
1697 1697
1698 status &= ~mask; 1698 status &= ~mask;
1699 } 1699 }
@@ -9374,7 +9374,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9374#ifdef BNX2X_STOP_ON_ERROR 9374#ifdef BNX2X_STOP_ON_ERROR
9375poll_panic: 9375poll_panic:
9376#endif 9376#endif
9377 netif_rx_complete(napi); 9377 napi_complete(napi);
9378 9378
9379 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 9379 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9380 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); 9380 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9fb388388fb7..21bce2c0fde2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3369,7 +3369,7 @@ static int bond_info_seq_show(struct seq_file *seq, void *v)
3369 return 0; 3369 return 0;
3370} 3370}
3371 3371
3372static struct seq_operations bond_info_seq_ops = { 3372static const struct seq_operations bond_info_seq_ops = {
3373 .start = bond_info_seq_start, 3373 .start = bond_info_seq_start,
3374 .next = bond_info_seq_next, 3374 .next = bond_info_seq_next,
3375 .stop = bond_info_seq_stop, 3375 .stop = bond_info_seq_stop,
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 840b3d1a22f5..bb46be275339 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2506,7 +2506,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
2506 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2506 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2507#ifdef USE_NAPI 2507#ifdef USE_NAPI
2508 cas_mask_intr(cp); 2508 cas_mask_intr(cp);
2509 netif_rx_schedule(&cp->napi); 2509 napi_schedule(&cp->napi);
2510#else 2510#else
2511 cas_rx_ringN(cp, ring, 0); 2511 cas_rx_ringN(cp, ring, 0);
2512#endif 2512#endif
@@ -2557,7 +2557,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2557 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2557 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2558#ifdef USE_NAPI 2558#ifdef USE_NAPI
2559 cas_mask_intr(cp); 2559 cas_mask_intr(cp);
2560 netif_rx_schedule(&cp->napi); 2560 napi_schedule(&cp->napi);
2561#else 2561#else
2562 cas_rx_ringN(cp, 1, 0); 2562 cas_rx_ringN(cp, 1, 0);
2563#endif 2563#endif
@@ -2613,7 +2613,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id)
2613 if (status & INTR_RX_DONE) { 2613 if (status & INTR_RX_DONE) {
2614#ifdef USE_NAPI 2614#ifdef USE_NAPI
2615 cas_mask_intr(cp); 2615 cas_mask_intr(cp);
2616 netif_rx_schedule(&cp->napi); 2616 napi_schedule(&cp->napi);
2617#else 2617#else
2618 cas_rx_ringN(cp, 0, 0); 2618 cas_rx_ringN(cp, 0, 0);
2619#endif 2619#endif
@@ -2691,7 +2691,7 @@ rx_comp:
2691#endif 2691#endif
2692 spin_unlock_irqrestore(&cp->lock, flags); 2692 spin_unlock_irqrestore(&cp->lock, flags);
2693 if (enable_intr) { 2693 if (enable_intr) {
2694 netif_rx_complete(napi); 2694 napi_complete(napi);
2695 cas_unmask_intr(cp); 2695 cas_unmask_intr(cp);
2696 } 2696 }
2697 return credits; 2697 return credits;
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index d984b7995763..840da83fb3cf 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1612,7 +1612,7 @@ int t1_poll(struct napi_struct *napi, int budget)
1612 int work_done = process_responses(adapter, budget); 1612 int work_done = process_responses(adapter, budget);
1613 1613
1614 if (likely(work_done < budget)) { 1614 if (likely(work_done < budget)) {
1615 netif_rx_complete(napi); 1615 napi_complete(napi);
1616 writel(adapter->sge->respQ.cidx, 1616 writel(adapter->sge->respQ.cidx,
1617 adapter->regs + A_SG_SLEEPING); 1617 adapter->regs + A_SG_SLEEPING);
1618 } 1618 }
@@ -1630,7 +1630,7 @@ irqreturn_t t1_interrupt(int irq, void *data)
1630 1630
1631 if (napi_schedule_prep(&adapter->napi)) { 1631 if (napi_schedule_prep(&adapter->napi)) {
1632 if (process_pure_responses(adapter)) 1632 if (process_pure_responses(adapter))
1633 __netif_rx_schedule(&adapter->napi); 1633 __napi_schedule(&adapter->napi);
1634 else { 1634 else {
1635 /* no data, no NAPI needed */ 1635 /* no data, no NAPI needed */
1636 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1636 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index f66548751c38..4dad04e91f6d 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -428,7 +428,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
428 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 428 printk(KERN_WARNING "%s: rx: polling, but no queue\n",
429 priv->dev->name); 429 priv->dev->name);
430 spin_unlock(&priv->rx_lock); 430 spin_unlock(&priv->rx_lock);
431 netif_rx_complete(napi); 431 napi_complete(napi);
432 return 0; 432 return 0;
433 } 433 }
434 434
@@ -514,7 +514,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
514 if (processed == 0) { 514 if (processed == 0) {
515 /* we ran out of packets to read, 515 /* we ran out of packets to read,
516 * revert to interrupt-driven mode */ 516 * revert to interrupt-driven mode */
517 netif_rx_complete(napi); 517 napi_complete(napi);
518 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 518 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
519 return 0; 519 return 0;
520 } 520 }
@@ -536,7 +536,7 @@ fatal_error:
536 } 536 }
537 537
538 spin_unlock(&priv->rx_lock); 538 spin_unlock(&priv->rx_lock);
539 netif_rx_complete(napi); 539 napi_complete(napi);
540 netif_tx_stop_all_queues(priv->dev); 540 netif_tx_stop_all_queues(priv->dev);
541 napi_disable(&priv->napi); 541 napi_disable(&priv->napi);
542 542
@@ -802,9 +802,9 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
802 802
803 if (status & MAC_INT_RX) { 803 if (status & MAC_INT_RX) {
804 queue = (status >> 8) & 7; 804 queue = (status >> 8) & 7;
805 if (netif_rx_schedule_prep(&priv->napi)) { 805 if (napi_schedule_prep(&priv->napi)) {
806 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); 806 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
807 __netif_rx_schedule(&priv->napi); 807 __napi_schedule(&priv->napi);
808 } 808 }
809 } 809 }
810 810
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index a89d8cc51205..fbe15699584e 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -42,7 +42,6 @@
42#include <linux/cache.h> 42#include <linux/cache.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/bitops.h> 44#include <linux/bitops.h>
45#include <linux/inet_lro.h>
46#include "t3cdev.h" 45#include "t3cdev.h"
47#include <asm/io.h> 46#include <asm/io.h>
48 47
@@ -178,15 +177,11 @@ enum { /* per port SGE statistics */
178 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ 177 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
179 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ 178 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
180 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ 179 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
181 SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */
182 SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */
183 SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */
184 180
185 SGE_PSTAT_MAX /* must be last */ 181 SGE_PSTAT_MAX /* must be last */
186}; 182};
187 183
188#define T3_MAX_LRO_SES 8 184struct napi_gro_fraginfo;
189#define T3_MAX_LRO_MAX_PKTS 64
190 185
191struct sge_qset { /* an SGE queue set */ 186struct sge_qset { /* an SGE queue set */
192 struct adapter *adap; 187 struct adapter *adap;
@@ -194,12 +189,8 @@ struct sge_qset { /* an SGE queue set */
194 struct sge_rspq rspq; 189 struct sge_rspq rspq;
195 struct sge_fl fl[SGE_RXQ_PER_SET]; 190 struct sge_fl fl[SGE_RXQ_PER_SET];
196 struct sge_txq txq[SGE_TXQ_PER_SET]; 191 struct sge_txq txq[SGE_TXQ_PER_SET];
197 struct net_lro_mgr lro_mgr; 192 struct napi_gro_fraginfo lro_frag_tbl;
198 struct net_lro_desc lro_desc[T3_MAX_LRO_SES];
199 struct skb_frag_struct *lro_frag_tbl;
200 int lro_nfrags;
201 int lro_enabled; 193 int lro_enabled;
202 int lro_frag_len;
203 void *lro_va; 194 void *lro_va;
204 struct net_device *netdev; 195 struct net_device *netdev;
205 struct netdev_queue *tx_q; /* associated netdev TX queue */ 196 struct netdev_queue *tx_q; /* associated netdev TX queue */
@@ -230,6 +221,7 @@ struct adapter {
230 unsigned int slow_intr_mask; 221 unsigned int slow_intr_mask;
231 unsigned long irq_stats[IRQ_NUM_STATS]; 222 unsigned long irq_stats[IRQ_NUM_STATS];
232 223
224 int msix_nvectors;
233 struct { 225 struct {
234 unsigned short vec; 226 unsigned short vec;
235 char desc[22]; 227 char desc[22];
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 0089746b8d02..7381f378b4e6 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -338,7 +338,7 @@ static void free_irq_resources(struct adapter *adapter)
338 338
339 free_irq(adapter->msix_info[0].vec, adapter); 339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i) 340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets; 341 n += adap2pinfo(adapter, i)->nqsets;
342 342
343 for (i = 0; i < n; ++i) 343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec, 344 free_irq(adapter->msix_info[i + 1].vec,
@@ -508,19 +508,9 @@ static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
508{ 508{
509 struct port_info *pi = netdev_priv(dev); 509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter; 510 struct adapter *adapter = pi->adapter;
511 int i, lro_on = 1;
512 511
513 adapter->params.sge.qset[qset_idx].lro = !!val; 512 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val; 513 adapter->sge.qs[qset_idx].lro_enabled = !!val;
515
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
519
520 if (lro_on)
521 dev->features |= NETIF_F_LRO;
522 else
523 dev->features &= ~NETIF_F_LRO;
524} 514}
525 515
526/** 516/**
@@ -1433,9 +1423,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); 1423 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); 1424 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); 1425 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1436 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR); 1426 *data++ = 0;
1437 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED); 1427 *data++ = 0;
1438 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC); 1428 *data++ = 0;
1439 *data++ = s->rx_cong_drops; 1429 *data++ = s->rx_cong_drops;
1440 1430
1441 *data++ = s->num_toggled; 1431 *data++ = s->num_toggled;
@@ -1826,28 +1816,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1826 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1816 memset(&wol->sopass, 0, sizeof(wol->sopass));
1827} 1817}
1828 1818
1829static int cxgb3_set_flags(struct net_device *dev, u32 data)
1830{
1831 struct port_info *pi = netdev_priv(dev);
1832 int i;
1833
1834 if (data & ETH_FLAG_LRO) {
1835 if (!(pi->rx_offload & T3_RX_CSUM))
1836 return -EINVAL;
1837
1838 pi->rx_offload |= T3_LRO;
1839 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1840 set_qset_lro(dev, i, 1);
1841
1842 } else {
1843 pi->rx_offload &= ~T3_LRO;
1844 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1845 set_qset_lro(dev, i, 0);
1846 }
1847
1848 return 0;
1849}
1850
1851static const struct ethtool_ops cxgb_ethtool_ops = { 1819static const struct ethtool_ops cxgb_ethtool_ops = {
1852 .get_settings = get_settings, 1820 .get_settings = get_settings,
1853 .set_settings = set_settings, 1821 .set_settings = set_settings,
@@ -1877,8 +1845,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
1877 .get_regs = get_regs, 1845 .get_regs = get_regs,
1878 .get_wol = get_wol, 1846 .get_wol = get_wol,
1879 .set_tso = ethtool_op_set_tso, 1847 .set_tso = ethtool_op_set_tso,
1880 .get_flags = ethtool_op_get_flags,
1881 .set_flags = cxgb3_set_flags,
1882}; 1848};
1883 1849
1884static int in_range(int val, int lo, int hi) 1850static int in_range(int val, int lo, int hi)
@@ -2752,7 +2718,7 @@ static void set_nqsets(struct adapter *adap)
2752 int i, j = 0; 2718 int i, j = 0;
2753 int num_cpus = num_online_cpus(); 2719 int num_cpus = num_online_cpus();
2754 int hwports = adap->params.nports; 2720 int hwports = adap->params.nports;
2755 int nqsets = SGE_QSETS; 2721 int nqsets = adap->msix_nvectors - 1;
2756 2722
2757 if (adap->params.rev > 0 && adap->flags & USING_MSIX) { 2723 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2758 if (hwports == 2 && 2724 if (hwports == 2 &&
@@ -2781,18 +2747,25 @@ static void set_nqsets(struct adapter *adap)
2781static int __devinit cxgb_enable_msix(struct adapter *adap) 2747static int __devinit cxgb_enable_msix(struct adapter *adap)
2782{ 2748{
2783 struct msix_entry entries[SGE_QSETS + 1]; 2749 struct msix_entry entries[SGE_QSETS + 1];
2750 int vectors;
2784 int i, err; 2751 int i, err;
2785 2752
2786 for (i = 0; i < ARRAY_SIZE(entries); ++i) 2753 vectors = ARRAY_SIZE(entries);
2754 for (i = 0; i < vectors; ++i)
2787 entries[i].entry = i; 2755 entries[i].entry = i;
2788 2756
2789 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries)); 2757 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2758 vectors = err;
2759
2760 if (!err && vectors < (adap->params.nports + 1))
2761 err = -1;
2762
2790 if (!err) { 2763 if (!err) {
2791 for (i = 0; i < ARRAY_SIZE(entries); ++i) 2764 for (i = 0; i < vectors; ++i)
2792 adap->msix_info[i].vec = entries[i].vector; 2765 adap->msix_info[i].vec = entries[i].vector;
2793 } else if (err > 0) 2766 adap->msix_nvectors = vectors;
2794 dev_info(&adap->pdev->dev, 2767 }
2795 "only %d MSI-X vectors left, not using MSI-X\n", err); 2768
2796 return err; 2769 return err;
2797} 2770}
2798 2771
@@ -2960,7 +2933,7 @@ static int __devinit init_one(struct pci_dev *pdev,
2960 netdev->mem_end = mmio_start + mmio_len - 1; 2933 netdev->mem_end = mmio_start + mmio_len - 1;
2961 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 2934 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2962 netdev->features |= NETIF_F_LLTX; 2935 netdev->features |= NETIF_F_LLTX;
2963 netdev->features |= NETIF_F_LRO; 2936 netdev->features |= NETIF_F_GRO;
2964 if (pci_using_dac) 2937 if (pci_using_dac)
2965 netdev->features |= NETIF_F_HIGHDMA; 2938 netdev->features |= NETIF_F_HIGHDMA;
2966 2939
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 379a1324db4e..8299fb538f25 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -585,8 +585,7 @@ static void t3_reset_qset(struct sge_qset *q)
585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
586 q->txq_stopped = 0; 586 q->txq_stopped = 0;
587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ 587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
588 kfree(q->lro_frag_tbl); 588 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
589 q->lro_nfrags = q->lro_frag_len = 0;
590} 589}
591 590
592 591
@@ -1945,10 +1944,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1945 qs->port_stats[SGE_PSTAT_VLANEX]++; 1944 qs->port_stats[SGE_PSTAT_VLANEX]++;
1946 if (likely(grp)) 1945 if (likely(grp))
1947 if (lro) 1946 if (lro)
1948 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb, 1947 vlan_gro_receive(&qs->napi, grp,
1949 grp, 1948 ntohs(p->vlan), skb);
1950 ntohs(p->vlan),
1951 p);
1952 else { 1949 else {
1953 if (unlikely(pi->iscsi_ipv4addr && 1950 if (unlikely(pi->iscsi_ipv4addr &&
1954 is_arp(skb))) { 1951 is_arp(skb))) {
@@ -1965,7 +1962,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1965 dev_kfree_skb_any(skb); 1962 dev_kfree_skb_any(skb);
1966 } else if (rq->polling) { 1963 } else if (rq->polling) {
1967 if (lro) 1964 if (lro)
1968 lro_receive_skb(&qs->lro_mgr, skb, p); 1965 napi_gro_receive(&qs->napi, skb);
1969 else { 1966 else {
1970 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) 1967 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
1971 cxgb3_arp_process(adap, skb); 1968 cxgb3_arp_process(adap, skb);
@@ -1981,59 +1978,6 @@ static inline int is_eth_tcp(u32 rss)
1981} 1978}
1982 1979
1983/** 1980/**
1984 * lro_frame_ok - check if an ingress packet is eligible for LRO
1985 * @p: the CPL header of the packet
1986 *
1987 * Returns true if a received packet is eligible for LRO.
1988 * The following conditions must be true:
1989 * - packet is TCP/IP Ethernet II (checked elsewhere)
1990 * - not an IP fragment
1991 * - no IP options
1992 * - TCP/IP checksums are correct
1993 * - the packet is for this host
1994 */
1995static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1996{
1997 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1998 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1999
2000 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
2001 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
2002}
2003
2004static int t3_get_lro_header(void **eh, void **iph, void **tcph,
2005 u64 *hdr_flags, void *priv)
2006{
2007 const struct cpl_rx_pkt *cpl = priv;
2008
2009 if (!lro_frame_ok(cpl))
2010 return -1;
2011
2012 *eh = (struct ethhdr *)(cpl + 1);
2013 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
2014 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
2015
2016 *hdr_flags = LRO_IPV4 | LRO_TCP;
2017 return 0;
2018}
2019
2020static int t3_get_skb_header(struct sk_buff *skb,
2021 void **iph, void **tcph, u64 *hdr_flags,
2022 void *priv)
2023{
2024 void *eh;
2025
2026 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
2027}
2028
2029static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
2030 void **iph, void **tcph, u64 *hdr_flags,
2031 void *priv)
2032{
2033 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
2034}
2035
2036/**
2037 * lro_add_page - add a page chunk to an LRO session 1981 * lro_add_page - add a page chunk to an LRO session
2038 * @adap: the adapter 1982 * @adap: the adapter
2039 * @qs: the associated queue set 1983 * @qs: the associated queue set
@@ -2049,8 +1993,9 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2049{ 1993{
2050 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 1994 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2051 struct cpl_rx_pkt *cpl; 1995 struct cpl_rx_pkt *cpl;
2052 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl; 1996 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
2053 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len; 1997 int nr_frags = qs->lro_frag_tbl.nr_frags;
1998 int frag_len = qs->lro_frag_tbl.len;
2054 int offset = 0; 1999 int offset = 0;
2055 2000
2056 if (!nr_frags) { 2001 if (!nr_frags) {
@@ -2069,13 +2014,13 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2069 rx_frag->page_offset = sd->pg_chunk.offset + offset; 2014 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2070 rx_frag->size = len; 2015 rx_frag->size = len;
2071 frag_len += len; 2016 frag_len += len;
2072 qs->lro_nfrags++; 2017 qs->lro_frag_tbl.nr_frags++;
2073 qs->lro_frag_len = frag_len; 2018 qs->lro_frag_tbl.len = frag_len;
2074 2019
2075 if (!complete) 2020 if (!complete)
2076 return; 2021 return;
2077 2022
2078 qs->lro_nfrags = qs->lro_frag_len = 0; 2023 qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
2079 cpl = qs->lro_va; 2024 cpl = qs->lro_va;
2080 2025
2081 if (unlikely(cpl->vlan_valid)) { 2026 if (unlikely(cpl->vlan_valid)) {
@@ -2084,36 +2029,15 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2084 struct vlan_group *grp = pi->vlan_grp; 2029 struct vlan_group *grp = pi->vlan_grp;
2085 2030
2086 if (likely(grp != NULL)) { 2031 if (likely(grp != NULL)) {
2087 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr, 2032 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
2088 qs->lro_frag_tbl, 2033 &qs->lro_frag_tbl);
2089 frag_len, frag_len, 2034 goto out;
2090 grp, ntohs(cpl->vlan),
2091 cpl, 0);
2092 return;
2093 } 2035 }
2094 } 2036 }
2095 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl, 2037 napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
2096 frag_len, frag_len, cpl, 0);
2097}
2098 2038
2099/** 2039out:
2100 * init_lro_mgr - initialize a LRO manager object 2040 qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
2101 * @lro_mgr: the LRO manager object
2102 */
2103static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2104{
2105 lro_mgr->dev = qs->netdev;
2106 lro_mgr->features = LRO_F_NAPI;
2107 lro_mgr->frag_align_pad = NET_IP_ALIGN;
2108 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2109 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2110 lro_mgr->max_desc = T3_MAX_LRO_SES;
2111 lro_mgr->lro_arr = qs->lro_desc;
2112 lro_mgr->get_frag_header = t3_get_frag_header;
2113 lro_mgr->get_skb_header = t3_get_skb_header;
2114 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2115 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2116 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2117} 2041}
2118 2042
2119/** 2043/**
@@ -2357,10 +2281,6 @@ next_fl:
2357 } 2281 }
2358 2282
2359 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); 2283 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2360 lro_flush_all(&qs->lro_mgr);
2361 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2362 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2363 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2364 2284
2365 if (sleeping) 2285 if (sleeping)
2366 check_ring_db(adap, qs, sleeping); 2286 check_ring_db(adap, qs, sleeping);
@@ -2907,7 +2827,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2907{ 2827{
2908 int i, avail, ret = -ENOMEM; 2828 int i, avail, ret = -ENOMEM;
2909 struct sge_qset *q = &adapter->sge.qs[id]; 2829 struct sge_qset *q = &adapter->sge.qs[id];
2910 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
2911 2830
2912 init_qset_cntxt(q, id); 2831 init_qset_cntxt(q, id);
2913 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q); 2832 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
@@ -2987,10 +2906,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2987 q->fl[0].order = FL0_PG_ORDER; 2906 q->fl[0].order = FL0_PG_ORDER;
2988 q->fl[1].order = FL1_PG_ORDER; 2907 q->fl[1].order = FL1_PG_ORDER;
2989 2908
2990 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2991 sizeof(struct skb_frag_struct),
2992 GFP_KERNEL);
2993 q->lro_nfrags = q->lro_frag_len = 0;
2994 spin_lock_irq(&adapter->sge.reg_lock); 2909 spin_lock_irq(&adapter->sge.reg_lock);
2995 2910
2996 /* FL threshold comparison uses < */ 2911 /* FL threshold comparison uses < */
@@ -3042,8 +2957,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3042 q->tx_q = netdevq; 2957 q->tx_q = netdevq;
3043 t3_update_qset_coalesce(q, p); 2958 t3_update_qset_coalesce(q, p);
3044 2959
3045 init_lro_mgr(q, lro_mgr);
3046
3047 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, 2960 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3048 GFP_KERNEL | __GFP_COMP); 2961 GFP_KERNEL | __GFP_COMP);
3049 if (!avail) { 2962 if (!avail) {
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 86bb876fb123..861d2eeaa43c 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1944,9 +1944,9 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
1944 if (stat_ack & stat_ack_rnr) 1944 if (stat_ack & stat_ack_rnr)
1945 nic->ru_running = RU_SUSPENDED; 1945 nic->ru_running = RU_SUSPENDED;
1946 1946
1947 if (likely(netif_rx_schedule_prep(&nic->napi))) { 1947 if (likely(napi_schedule_prep(&nic->napi))) {
1948 e100_disable_irq(nic); 1948 e100_disable_irq(nic);
1949 __netif_rx_schedule(&nic->napi); 1949 __napi_schedule(&nic->napi);
1950 } 1950 }
1951 1951
1952 return IRQ_HANDLED; 1952 return IRQ_HANDLED;
@@ -1962,7 +1962,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
1962 1962
1963 /* If budget not fully consumed, exit the polling mode */ 1963 /* If budget not fully consumed, exit the polling mode */
1964 if (work_done < budget) { 1964 if (work_done < budget) {
1965 netif_rx_complete(napi); 1965 napi_complete(napi);
1966 e100_enable_irq(nic); 1966 e100_enable_irq(nic);
1967 } 1967 }
1968 1968
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f5581de04757..e9a416f40162 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -182,7 +182,6 @@ struct e1000_tx_ring {
182 /* array of buffer information structs */ 182 /* array of buffer information structs */
183 struct e1000_buffer *buffer_info; 183 struct e1000_buffer *buffer_info;
184 184
185 spinlock_t tx_lock;
186 u16 tdh; 185 u16 tdh;
187 u16 tdt; 186 u16 tdt;
188 bool last_tx_tso; 187 bool last_tx_tso;
@@ -238,7 +237,6 @@ struct e1000_adapter {
238 u16 link_speed; 237 u16 link_speed;
239 u16 link_duplex; 238 u16 link_duplex;
240 spinlock_t stats_lock; 239 spinlock_t stats_lock;
241 spinlock_t tx_queue_lock;
242 unsigned int total_tx_bytes; 240 unsigned int total_tx_bytes;
243 unsigned int total_tx_packets; 241 unsigned int total_tx_packets;
244 unsigned int total_rx_bytes; 242 unsigned int total_rx_bytes;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 26474c92193f..7ec1a0c5a0cf 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.20-k3-NAPI" 34#define DRV_VERSION "7.3.21-k2-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -1048,8 +1048,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1048 if (pci_using_dac) 1048 if (pci_using_dac)
1049 netdev->features |= NETIF_F_HIGHDMA; 1049 netdev->features |= NETIF_F_HIGHDMA;
1050 1050
1051 netdev->features |= NETIF_F_LLTX;
1052
1053 netdev->vlan_features |= NETIF_F_TSO; 1051 netdev->vlan_features |= NETIF_F_TSO;
1054 netdev->vlan_features |= NETIF_F_TSO6; 1052 netdev->vlan_features |= NETIF_F_TSO6;
1055 netdev->vlan_features |= NETIF_F_HW_CSUM; 1053 netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -1368,8 +1366,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1368 return -ENOMEM; 1366 return -ENOMEM;
1369 } 1367 }
1370 1368
1371 spin_lock_init(&adapter->tx_queue_lock);
1372
1373 /* Explicitly disable IRQ since the NIC can be in any state. */ 1369 /* Explicitly disable IRQ since the NIC can be in any state. */
1374 e1000_irq_disable(adapter); 1370 e1000_irq_disable(adapter);
1375 1371
@@ -1624,7 +1620,6 @@ setup_tx_desc_die:
1624 1620
1625 txdr->next_to_use = 0; 1621 txdr->next_to_use = 0;
1626 txdr->next_to_clean = 0; 1622 txdr->next_to_clean = 0;
1627 spin_lock_init(&txdr->tx_lock);
1628 1623
1629 return 0; 1624 return 0;
1630} 1625}
@@ -3185,7 +3180,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3185 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3180 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3186 unsigned int tx_flags = 0; 3181 unsigned int tx_flags = 0;
3187 unsigned int len = skb->len - skb->data_len; 3182 unsigned int len = skb->len - skb->data_len;
3188 unsigned long flags;
3189 unsigned int nr_frags; 3183 unsigned int nr_frags;
3190 unsigned int mss; 3184 unsigned int mss;
3191 int count = 0; 3185 int count = 0;
@@ -3290,22 +3284,15 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3290 (hw->mac_type == e1000_82573)) 3284 (hw->mac_type == e1000_82573))
3291 e1000_transfer_dhcp_info(adapter, skb); 3285 e1000_transfer_dhcp_info(adapter, skb);
3292 3286
3293 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
3294 /* Collision - tell upper layer to requeue */
3295 return NETDEV_TX_LOCKED;
3296
3297 /* need: count + 2 desc gap to keep tail from touching 3287 /* need: count + 2 desc gap to keep tail from touching
3298 * head, otherwise try next time */ 3288 * head, otherwise try next time */
3299 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) { 3289 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3300 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3301 return NETDEV_TX_BUSY; 3290 return NETDEV_TX_BUSY;
3302 }
3303 3291
3304 if (unlikely(hw->mac_type == e1000_82547)) { 3292 if (unlikely(hw->mac_type == e1000_82547)) {
3305 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 3293 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3306 netif_stop_queue(netdev); 3294 netif_stop_queue(netdev);
3307 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 3295 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
3308 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3309 return NETDEV_TX_BUSY; 3296 return NETDEV_TX_BUSY;
3310 } 3297 }
3311 } 3298 }
@@ -3320,7 +3307,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3320 tso = e1000_tso(adapter, tx_ring, skb); 3307 tso = e1000_tso(adapter, tx_ring, skb);
3321 if (tso < 0) { 3308 if (tso < 0) {
3322 dev_kfree_skb_any(skb); 3309 dev_kfree_skb_any(skb);
3323 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3324 return NETDEV_TX_OK; 3310 return NETDEV_TX_OK;
3325 } 3311 }
3326 3312
@@ -3345,7 +3331,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3345 /* Make sure there is space in the ring for the next send. */ 3331 /* Make sure there is space in the ring for the next send. */
3346 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3332 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3347 3333
3348 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3349 return NETDEV_TX_OK; 3334 return NETDEV_TX_OK;
3350} 3335}
3351 3336
@@ -3687,12 +3672,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
3687 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3672 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3688 } 3673 }
3689 3674
3690 if (likely(netif_rx_schedule_prep(&adapter->napi))) { 3675 if (likely(napi_schedule_prep(&adapter->napi))) {
3691 adapter->total_tx_bytes = 0; 3676 adapter->total_tx_bytes = 0;
3692 adapter->total_tx_packets = 0; 3677 adapter->total_tx_packets = 0;
3693 adapter->total_rx_bytes = 0; 3678 adapter->total_rx_bytes = 0;
3694 adapter->total_rx_packets = 0; 3679 adapter->total_rx_packets = 0;
3695 __netif_rx_schedule(&adapter->napi); 3680 __napi_schedule(&adapter->napi);
3696 } else 3681 } else
3697 e1000_irq_enable(adapter); 3682 e1000_irq_enable(adapter);
3698 3683
@@ -3747,12 +3732,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
3747 ew32(IMC, ~0); 3732 ew32(IMC, ~0);
3748 E1000_WRITE_FLUSH(); 3733 E1000_WRITE_FLUSH();
3749 } 3734 }
3750 if (likely(netif_rx_schedule_prep(&adapter->napi))) { 3735 if (likely(napi_schedule_prep(&adapter->napi))) {
3751 adapter->total_tx_bytes = 0; 3736 adapter->total_tx_bytes = 0;
3752 adapter->total_tx_packets = 0; 3737 adapter->total_tx_packets = 0;
3753 adapter->total_rx_bytes = 0; 3738 adapter->total_rx_bytes = 0;
3754 adapter->total_rx_packets = 0; 3739 adapter->total_rx_packets = 0;
3755 __netif_rx_schedule(&adapter->napi); 3740 __napi_schedule(&adapter->napi);
3756 } else 3741 } else
3757 /* this really should not happen! if it does it is basically a 3742 /* this really should not happen! if it does it is basically a
3758 * bug, but not a hard error, so enable ints and continue */ 3743 * bug, but not a hard error, so enable ints and continue */
@@ -3773,15 +3758,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
3773 3758
3774 adapter = netdev_priv(poll_dev); 3759 adapter = netdev_priv(poll_dev);
3775 3760
3776 /* e1000_clean is called per-cpu. This lock protects 3761 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3777 * tx_ring[0] from being cleaned by multiple cpus
3778 * simultaneously. A failure obtaining the lock means
3779 * tx_ring[0] is currently being cleaned anyway. */
3780 if (spin_trylock(&adapter->tx_queue_lock)) {
3781 tx_cleaned = e1000_clean_tx_irq(adapter,
3782 &adapter->tx_ring[0]);
3783 spin_unlock(&adapter->tx_queue_lock);
3784 }
3785 3762
3786 adapter->clean_rx(adapter, &adapter->rx_ring[0], 3763 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3787 &work_done, budget); 3764 &work_done, budget);
@@ -3793,7 +3770,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
3793 if (work_done < budget) { 3770 if (work_done < budget) {
3794 if (likely(adapter->itr_setting & 3)) 3771 if (likely(adapter->itr_setting & 3))
3795 e1000_set_itr(adapter); 3772 e1000_set_itr(adapter);
3796 netif_rx_complete(napi); 3773 napi_complete(napi);
3797 e1000_irq_enable(adapter); 3774 e1000_irq_enable(adapter);
3798 } 3775 }
3799 3776
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 37bcb190eef8..28bf9a51346f 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -195,8 +195,6 @@ struct e1000_adapter {
195 u16 link_duplex; 195 u16 link_duplex;
196 u16 eeprom_vers; 196 u16 eeprom_vers;
197 197
198 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
199
200 /* track device up/down/testing state */ 198 /* track device up/down/testing state */
201 unsigned long state; 199 unsigned long state;
202 200
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 91817d0afcaf..e04b392c9a59 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -47,7 +47,7 @@
47 47
48#include "e1000.h" 48#include "e1000.h"
49 49
50#define DRV_VERSION "0.3.3.3-k6" 50#define DRV_VERSION "0.3.3.4-k2"
51char e1000e_driver_name[] = "e1000e"; 51char e1000e_driver_name[] = "e1000e";
52const char e1000e_driver_version[] = DRV_VERSION; 52const char e1000e_driver_version[] = DRV_VERSION;
53 53
@@ -99,8 +99,8 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
99 skb->protocol = eth_type_trans(skb, netdev); 99 skb->protocol = eth_type_trans(skb, netdev);
100 100
101 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) 101 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
102 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 102 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
103 le16_to_cpu(vlan)); 103 le16_to_cpu(vlan), skb);
104 else 104 else
105 napi_gro_receive(&adapter->napi, skb); 105 napi_gro_receive(&adapter->napi, skb);
106} 106}
@@ -1179,12 +1179,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
1179 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1179 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1180 } 1180 }
1181 1181
1182 if (netif_rx_schedule_prep(&adapter->napi)) { 1182 if (napi_schedule_prep(&adapter->napi)) {
1183 adapter->total_tx_bytes = 0; 1183 adapter->total_tx_bytes = 0;
1184 adapter->total_tx_packets = 0; 1184 adapter->total_tx_packets = 0;
1185 adapter->total_rx_bytes = 0; 1185 adapter->total_rx_bytes = 0;
1186 adapter->total_rx_packets = 0; 1186 adapter->total_rx_packets = 0;
1187 __netif_rx_schedule(&adapter->napi); 1187 __napi_schedule(&adapter->napi);
1188 } 1188 }
1189 1189
1190 return IRQ_HANDLED; 1190 return IRQ_HANDLED;
@@ -1246,12 +1246,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
1246 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1246 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1247 } 1247 }
1248 1248
1249 if (netif_rx_schedule_prep(&adapter->napi)) { 1249 if (napi_schedule_prep(&adapter->napi)) {
1250 adapter->total_tx_bytes = 0; 1250 adapter->total_tx_bytes = 0;
1251 adapter->total_tx_packets = 0; 1251 adapter->total_tx_packets = 0;
1252 adapter->total_rx_bytes = 0; 1252 adapter->total_rx_bytes = 0;
1253 adapter->total_rx_packets = 0; 1253 adapter->total_rx_packets = 0;
1254 __netif_rx_schedule(&adapter->napi); 1254 __napi_schedule(&adapter->napi);
1255 } 1255 }
1256 1256
1257 return IRQ_HANDLED; 1257 return IRQ_HANDLED;
@@ -1320,10 +1320,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1320 adapter->rx_ring->set_itr = 0; 1320 adapter->rx_ring->set_itr = 0;
1321 } 1321 }
1322 1322
1323 if (netif_rx_schedule_prep(&adapter->napi)) { 1323 if (napi_schedule_prep(&adapter->napi)) {
1324 adapter->total_rx_bytes = 0; 1324 adapter->total_rx_bytes = 0;
1325 adapter->total_rx_packets = 0; 1325 adapter->total_rx_packets = 0;
1326 __netif_rx_schedule(&adapter->napi); 1326 __napi_schedule(&adapter->napi);
1327 } 1327 }
1328 return IRQ_HANDLED; 1328 return IRQ_HANDLED;
1329} 1329}
@@ -1698,7 +1698,6 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1698 1698
1699 tx_ring->next_to_use = 0; 1699 tx_ring->next_to_use = 0;
1700 tx_ring->next_to_clean = 0; 1700 tx_ring->next_to_clean = 0;
1701 spin_lock_init(&adapter->tx_queue_lock);
1702 1701
1703 return 0; 1702 return 0;
1704err: 1703err:
@@ -2007,16 +2006,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
2007 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2006 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2008 goto clean_rx; 2007 goto clean_rx;
2009 2008
2010 /* 2009 tx_cleaned = e1000_clean_tx_irq(adapter);
2011 * e1000_clean is called per-cpu. This lock protects
2012 * tx_ring from being cleaned by multiple cpus
2013 * simultaneously. A failure obtaining the lock means
2014 * tx_ring is currently being cleaned anyway.
2015 */
2016 if (spin_trylock(&adapter->tx_queue_lock)) {
2017 tx_cleaned = e1000_clean_tx_irq(adapter);
2018 spin_unlock(&adapter->tx_queue_lock);
2019 }
2020 2010
2021clean_rx: 2011clean_rx:
2022 adapter->clean_rx(adapter, &work_done, budget); 2012 adapter->clean_rx(adapter, &work_done, budget);
@@ -2028,7 +2018,7 @@ clean_rx:
2028 if (work_done < budget) { 2018 if (work_done < budget) {
2029 if (adapter->itr_setting & 3) 2019 if (adapter->itr_setting & 3)
2030 e1000_set_itr(adapter); 2020 e1000_set_itr(adapter);
2031 netif_rx_complete(napi); 2021 napi_complete(napi);
2032 if (adapter->msix_entries) 2022 if (adapter->msix_entries)
2033 ew32(IMS, adapter->rx_ring->ims_val); 2023 ew32(IMS, adapter->rx_ring->ims_val);
2034 else 2024 else
@@ -2922,8 +2912,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2922 if (e1000_alloc_queues(adapter)) 2912 if (e1000_alloc_queues(adapter))
2923 return -ENOMEM; 2913 return -ENOMEM;
2924 2914
2925 spin_lock_init(&adapter->tx_queue_lock);
2926
2927 /* Explicitly disable IRQ since the NIC can be in any state. */ 2915 /* Explicitly disable IRQ since the NIC can be in any state. */
2928 e1000_irq_disable(adapter); 2916 e1000_irq_disable(adapter);
2929 2917
@@ -4069,7 +4057,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4069 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 4057 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4070 unsigned int tx_flags = 0; 4058 unsigned int tx_flags = 0;
4071 unsigned int len = skb->len - skb->data_len; 4059 unsigned int len = skb->len - skb->data_len;
4072 unsigned long irq_flags;
4073 unsigned int nr_frags; 4060 unsigned int nr_frags;
4074 unsigned int mss; 4061 unsigned int mss;
4075 int count = 0; 4062 int count = 0;
@@ -4138,18 +4125,12 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4138 if (adapter->hw.mac.tx_pkt_filtering) 4125 if (adapter->hw.mac.tx_pkt_filtering)
4139 e1000_transfer_dhcp_info(adapter, skb); 4126 e1000_transfer_dhcp_info(adapter, skb);
4140 4127
4141 if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
4142 /* Collision - tell upper layer to requeue */
4143 return NETDEV_TX_LOCKED;
4144
4145 /* 4128 /*
4146 * need: count + 2 desc gap to keep tail from touching 4129 * need: count + 2 desc gap to keep tail from touching
4147 * head, otherwise try next time 4130 * head, otherwise try next time
4148 */ 4131 */
4149 if (e1000_maybe_stop_tx(netdev, count + 2)) { 4132 if (e1000_maybe_stop_tx(netdev, count + 2))
4150 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4151 return NETDEV_TX_BUSY; 4133 return NETDEV_TX_BUSY;
4152 }
4153 4134
4154 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 4135 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4155 tx_flags |= E1000_TX_FLAGS_VLAN; 4136 tx_flags |= E1000_TX_FLAGS_VLAN;
@@ -4161,7 +4142,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4161 tso = e1000_tso(adapter, skb); 4142 tso = e1000_tso(adapter, skb);
4162 if (tso < 0) { 4143 if (tso < 0) {
4163 dev_kfree_skb_any(skb); 4144 dev_kfree_skb_any(skb);
4164 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4165 return NETDEV_TX_OK; 4145 return NETDEV_TX_OK;
4166 } 4146 }
4167 4147
@@ -4182,7 +4162,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4182 if (count < 0) { 4162 if (count < 0) {
4183 /* handle pci_map_single() error in e1000_tx_map */ 4163 /* handle pci_map_single() error in e1000_tx_map */
4184 dev_kfree_skb_any(skb); 4164 dev_kfree_skb_any(skb);
4185 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4186 return NETDEV_TX_OK; 4165 return NETDEV_TX_OK;
4187 } 4166 }
4188 4167
@@ -4193,7 +4172,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4193 /* Make sure there is space in the ring for the next send. */ 4172 /* Make sure there is space in the ring for the next send. */
4194 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); 4173 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4195 4174
4196 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4197 return NETDEV_TX_OK; 4175 return NETDEV_TX_OK;
4198} 4176}
4199 4177
@@ -4922,12 +4900,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4922 if (pci_using_dac) 4900 if (pci_using_dac)
4923 netdev->features |= NETIF_F_HIGHDMA; 4901 netdev->features |= NETIF_F_HIGHDMA;
4924 4902
4925 /*
4926 * We should not be using LLTX anymore, but we are still Tx faster with
4927 * it.
4928 */
4929 netdev->features |= NETIF_F_LLTX;
4930
4931 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 4903 if (e1000e_enable_mng_pass_thru(&adapter->hw))
4932 adapter->flags |= FLAG_MNG_PT_ENABLED; 4904 adapter->flags |= FLAG_MNG_PT_ENABLED;
4933 4905
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 6271b9411ccf..f7e2ccfd3e8c 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0096" 43#define DRV_VERSION "EHEA_0097"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index dfe92264e825..19fccca74ce0 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -308,7 +308,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
308 308
309 memset(stats, 0, sizeof(*stats)); 309 memset(stats, 0, sizeof(*stats));
310 310
311 cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC); 311 cb2 = (void *)get_zeroed_page(GFP_ATOMIC);
312 if (!cb2) { 312 if (!cb2) {
313 ehea_error("no mem for cb2"); 313 ehea_error("no mem for cb2");
314 goto out; 314 goto out;
@@ -341,7 +341,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
341 stats->rx_packets = rx_packets; 341 stats->rx_packets = rx_packets;
342 342
343out_herr: 343out_herr:
344 kfree(cb2); 344 free_page((unsigned long)cb2);
345out: 345out:
346 return stats; 346 return stats;
347} 347}
@@ -370,8 +370,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
370 EHEA_L_PKT_SIZE); 370 EHEA_L_PKT_SIZE);
371 if (!skb_arr_rq1[index]) { 371 if (!skb_arr_rq1[index]) {
372 pr->rq1_skba.os_skbs = fill_wqes - i; 372 pr->rq1_skba.os_skbs = fill_wqes - i;
373 ehea_error("%s: no mem for skb/%d wqes filled",
374 dev->name, i);
375 break; 373 break;
376 } 374 }
377 } 375 }
@@ -387,26 +385,19 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
387 ehea_update_rq1a(pr->qp, adder); 385 ehea_update_rq1a(pr->qp, adder);
388} 386}
389 387
390static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) 388static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
391{ 389{
392 int ret = 0;
393 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; 390 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
394 struct net_device *dev = pr->port->netdev; 391 struct net_device *dev = pr->port->netdev;
395 int i; 392 int i;
396 393
397 for (i = 0; i < pr->rq1_skba.len; i++) { 394 for (i = 0; i < pr->rq1_skba.len; i++) {
398 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 395 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
399 if (!skb_arr_rq1[i]) { 396 if (!skb_arr_rq1[i])
400 ehea_error("%s: no mem for skb/%d wqes filled", 397 break;
401 dev->name, i);
402 ret = -ENOMEM;
403 goto out;
404 }
405 } 398 }
406 /* Ring doorbell */ 399 /* Ring doorbell */
407 ehea_update_rq1a(pr->qp, nr_rq1a); 400 ehea_update_rq1a(pr->qp, nr_rq1a);
408out:
409 return ret;
410} 401}
411 402
412static int ehea_refill_rq_def(struct ehea_port_res *pr, 403static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -435,10 +426,12 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
435 u64 tmp_addr; 426 u64 tmp_addr;
436 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); 427 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
437 if (!skb) { 428 if (!skb) {
438 ehea_error("%s: no mem for skb/%d wqes filled",
439 pr->port->netdev->name, i);
440 q_skba->os_skbs = fill_wqes - i; 429 q_skba->os_skbs = fill_wqes - i;
441 ret = -ENOMEM; 430 if (q_skba->os_skbs == q_skba->len - 2) {
431 ehea_info("%s: rq%i ran dry - no mem for skb",
432 pr->port->netdev->name, rq_nr);
433 ret = -ENOMEM;
434 }
442 break; 435 break;
443 } 436 }
444 skb_reserve(skb, NET_IP_ALIGN); 437 skb_reserve(skb, NET_IP_ALIGN);
@@ -830,7 +823,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
830 while ((rx != budget) || force_irq) { 823 while ((rx != budget) || force_irq) {
831 pr->poll_counter = 0; 824 pr->poll_counter = 0;
832 force_irq = 0; 825 force_irq = 0;
833 netif_rx_complete(napi); 826 napi_complete(napi);
834 ehea_reset_cq_ep(pr->recv_cq); 827 ehea_reset_cq_ep(pr->recv_cq);
835 ehea_reset_cq_ep(pr->send_cq); 828 ehea_reset_cq_ep(pr->send_cq);
836 ehea_reset_cq_n1(pr->recv_cq); 829 ehea_reset_cq_n1(pr->recv_cq);
@@ -841,7 +834,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
841 if (!cqe && !cqe_skb) 834 if (!cqe && !cqe_skb)
842 return rx; 835 return rx;
843 836
844 if (!netif_rx_reschedule(napi)) 837 if (!napi_reschedule(napi))
845 return rx; 838 return rx;
846 839
847 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); 840 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
@@ -859,7 +852,7 @@ static void ehea_netpoll(struct net_device *dev)
859 int i; 852 int i;
860 853
861 for (i = 0; i < port->num_def_qps; i++) 854 for (i = 0; i < port->num_def_qps; i++)
862 netif_rx_schedule(&port->port_res[i].napi); 855 napi_schedule(&port->port_res[i].napi);
863} 856}
864#endif 857#endif
865 858
@@ -867,7 +860,7 @@ static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
867{ 860{
868 struct ehea_port_res *pr = param; 861 struct ehea_port_res *pr = param;
869 862
870 netif_rx_schedule(&pr->napi); 863 napi_schedule(&pr->napi);
871 864
872 return IRQ_HANDLED; 865 return IRQ_HANDLED;
873} 866}
@@ -915,7 +908,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
915 struct hcp_ehea_port_cb0 *cb0; 908 struct hcp_ehea_port_cb0 *cb0;
916 909
917 /* may be called via ehea_neq_tasklet() */ 910 /* may be called via ehea_neq_tasklet() */
918 cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); 911 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
919 if (!cb0) { 912 if (!cb0) {
920 ehea_error("no mem for cb0"); 913 ehea_error("no mem for cb0");
921 ret = -ENOMEM; 914 ret = -ENOMEM;
@@ -996,7 +989,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
996out_free: 989out_free:
997 if (ret || netif_msg_probe(port)) 990 if (ret || netif_msg_probe(port))
998 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); 991 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
999 kfree(cb0); 992 free_page((unsigned long)cb0);
1000out: 993out:
1001 return ret; 994 return ret;
1002} 995}
@@ -1007,7 +1000,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1007 u64 hret; 1000 u64 hret;
1008 int ret = 0; 1001 int ret = 0;
1009 1002
1010 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1003 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1011 if (!cb4) { 1004 if (!cb4) {
1012 ehea_error("no mem for cb4"); 1005 ehea_error("no mem for cb4");
1013 ret = -ENOMEM; 1006 ret = -ENOMEM;
@@ -1075,7 +1068,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1075 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) 1068 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1076 netif_carrier_on(port->netdev); 1069 netif_carrier_on(port->netdev);
1077 1070
1078 kfree(cb4); 1071 free_page((unsigned long)cb4);
1079out: 1072out:
1080 return ret; 1073 return ret;
1081} 1074}
@@ -1201,11 +1194,11 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
1201 int ret; 1194 int ret;
1202 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; 1195 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1203 1196
1204 ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 1197 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1205 - init_attr->act_nr_rwqes_rq2 1198 - init_attr->act_nr_rwqes_rq2
1206 - init_attr->act_nr_rwqes_rq3 - 1); 1199 - init_attr->act_nr_rwqes_rq3 - 1);
1207 1200
1208 ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); 1201 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1209 1202
1210 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); 1203 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1211 1204
@@ -1302,7 +1295,7 @@ static int ehea_configure_port(struct ehea_port *port)
1302 struct hcp_ehea_port_cb0 *cb0; 1295 struct hcp_ehea_port_cb0 *cb0;
1303 1296
1304 ret = -ENOMEM; 1297 ret = -ENOMEM;
1305 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1298 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1306 if (!cb0) 1299 if (!cb0)
1307 goto out; 1300 goto out;
1308 1301
@@ -1338,7 +1331,7 @@ static int ehea_configure_port(struct ehea_port *port)
1338 ret = 0; 1331 ret = 0;
1339 1332
1340out_free: 1333out_free:
1341 kfree(cb0); 1334 free_page((unsigned long)cb0);
1342out: 1335out:
1343 return ret; 1336 return ret;
1344} 1337}
@@ -1748,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1748 goto out; 1741 goto out;
1749 } 1742 }
1750 1743
1751 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1744 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1752 if (!cb0) { 1745 if (!cb0) {
1753 ehea_error("no mem for cb0"); 1746 ehea_error("no mem for cb0");
1754 ret = -ENOMEM; 1747 ret = -ENOMEM;
@@ -1793,7 +1786,7 @@ out_upregs:
1793 ehea_update_bcmc_registrations(); 1786 ehea_update_bcmc_registrations();
1794 spin_unlock(&ehea_bcmc_regs.lock); 1787 spin_unlock(&ehea_bcmc_regs.lock);
1795out_free: 1788out_free:
1796 kfree(cb0); 1789 free_page((unsigned long)cb0);
1797out: 1790out:
1798 return ret; 1791 return ret;
1799} 1792}
@@ -1817,7 +1810,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1817 if ((enable && port->promisc) || (!enable && !port->promisc)) 1810 if ((enable && port->promisc) || (!enable && !port->promisc))
1818 return; 1811 return;
1819 1812
1820 cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC); 1813 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1821 if (!cb7) { 1814 if (!cb7) {
1822 ehea_error("no mem for cb7"); 1815 ehea_error("no mem for cb7");
1823 goto out; 1816 goto out;
@@ -1836,7 +1829,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1836 1829
1837 port->promisc = enable; 1830 port->promisc = enable;
1838out: 1831out:
1839 kfree(cb7); 1832 free_page((unsigned long)cb7);
1840 return; 1833 return;
1841} 1834}
1842 1835
@@ -2217,7 +2210,7 @@ static void ehea_vlan_rx_register(struct net_device *dev,
2217 2210
2218 port->vgrp = grp; 2211 port->vgrp = grp;
2219 2212
2220 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2213 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2221 if (!cb1) { 2214 if (!cb1) {
2222 ehea_error("no mem for cb1"); 2215 ehea_error("no mem for cb1");
2223 goto out; 2216 goto out;
@@ -2228,7 +2221,7 @@ static void ehea_vlan_rx_register(struct net_device *dev,
2228 if (hret != H_SUCCESS) 2221 if (hret != H_SUCCESS)
2229 ehea_error("modify_ehea_port failed"); 2222 ehea_error("modify_ehea_port failed");
2230 2223
2231 kfree(cb1); 2224 free_page((unsigned long)cb1);
2232out: 2225out:
2233 return; 2226 return;
2234} 2227}
@@ -2241,7 +2234,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2241 int index; 2234 int index;
2242 u64 hret; 2235 u64 hret;
2243 2236
2244 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2237 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2245 if (!cb1) { 2238 if (!cb1) {
2246 ehea_error("no mem for cb1"); 2239 ehea_error("no mem for cb1");
2247 goto out; 2240 goto out;
@@ -2262,7 +2255,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2262 if (hret != H_SUCCESS) 2255 if (hret != H_SUCCESS)
2263 ehea_error("modify_ehea_port failed"); 2256 ehea_error("modify_ehea_port failed");
2264out: 2257out:
2265 kfree(cb1); 2258 free_page((unsigned long)cb1);
2266 return; 2259 return;
2267} 2260}
2268 2261
@@ -2276,7 +2269,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2276 2269
2277 vlan_group_set_device(port->vgrp, vid, NULL); 2270 vlan_group_set_device(port->vgrp, vid, NULL);
2278 2271
2279 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2272 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2280 if (!cb1) { 2273 if (!cb1) {
2281 ehea_error("no mem for cb1"); 2274 ehea_error("no mem for cb1");
2282 goto out; 2275 goto out;
@@ -2297,7 +2290,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2297 if (hret != H_SUCCESS) 2290 if (hret != H_SUCCESS)
2298 ehea_error("modify_ehea_port failed"); 2291 ehea_error("modify_ehea_port failed");
2299out: 2292out:
2300 kfree(cb1); 2293 free_page((unsigned long)cb1);
2301 return; 2294 return;
2302} 2295}
2303 2296
@@ -2309,7 +2302,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2309 u64 dummy64 = 0; 2302 u64 dummy64 = 0;
2310 struct hcp_modify_qp_cb0 *cb0; 2303 struct hcp_modify_qp_cb0 *cb0;
2311 2304
2312 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2305 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2313 if (!cb0) { 2306 if (!cb0) {
2314 ret = -ENOMEM; 2307 ret = -ENOMEM;
2315 goto out; 2308 goto out;
@@ -2372,7 +2365,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2372 2365
2373 ret = 0; 2366 ret = 0;
2374out: 2367out:
2375 kfree(cb0); 2368 free_page((unsigned long)cb0);
2376 return ret; 2369 return ret;
2377} 2370}
2378 2371
@@ -2664,7 +2657,7 @@ int ehea_stop_qps(struct net_device *dev)
2664 u64 dummy64 = 0; 2657 u64 dummy64 = 0;
2665 u16 dummy16 = 0; 2658 u16 dummy16 = 0;
2666 2659
2667 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2660 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2668 if (!cb0) { 2661 if (!cb0) {
2669 ret = -ENOMEM; 2662 ret = -ENOMEM;
2670 goto out; 2663 goto out;
@@ -2716,7 +2709,7 @@ int ehea_stop_qps(struct net_device *dev)
2716 2709
2717 ret = 0; 2710 ret = 0;
2718out: 2711out:
2719 kfree(cb0); 2712 free_page((unsigned long)cb0);
2720 2713
2721 return ret; 2714 return ret;
2722} 2715}
@@ -2766,7 +2759,7 @@ int ehea_restart_qps(struct net_device *dev)
2766 u64 dummy64 = 0; 2759 u64 dummy64 = 0;
2767 u16 dummy16 = 0; 2760 u16 dummy16 = 0;
2768 2761
2769 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2762 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2770 if (!cb0) { 2763 if (!cb0) {
2771 ret = -ENOMEM; 2764 ret = -ENOMEM;
2772 goto out; 2765 goto out;
@@ -2819,7 +2812,7 @@ int ehea_restart_qps(struct net_device *dev)
2819 ehea_refill_rq3(pr, 0); 2812 ehea_refill_rq3(pr, 0);
2820 } 2813 }
2821out: 2814out:
2822 kfree(cb0); 2815 free_page((unsigned long)cb0);
2823 2816
2824 return ret; 2817 return ret;
2825} 2818}
@@ -2950,7 +2943,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2950 u64 hret; 2943 u64 hret;
2951 int ret; 2944 int ret;
2952 2945
2953 cb = kzalloc(PAGE_SIZE, GFP_KERNEL); 2946 cb = (void *)get_zeroed_page(GFP_KERNEL);
2954 if (!cb) { 2947 if (!cb) {
2955 ret = -ENOMEM; 2948 ret = -ENOMEM;
2956 goto out; 2949 goto out;
@@ -2967,7 +2960,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2967 ret = 0; 2960 ret = 0;
2968 2961
2969out_herr: 2962out_herr:
2970 kfree(cb); 2963 free_page((unsigned long)cb);
2971out: 2964out:
2972 return ret; 2965 return ret;
2973} 2966}
@@ -2981,7 +2974,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2981 *jumbo = 0; 2974 *jumbo = 0;
2982 2975
2983 /* (Try to) enable *jumbo frames */ 2976 /* (Try to) enable *jumbo frames */
2984 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2977 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2985 if (!cb4) { 2978 if (!cb4) {
2986 ehea_error("no mem for cb4"); 2979 ehea_error("no mem for cb4");
2987 ret = -ENOMEM; 2980 ret = -ENOMEM;
@@ -3009,7 +3002,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
3009 } else 3002 } else
3010 ret = -EINVAL; 3003 ret = -EINVAL;
3011 3004
3012 kfree(cb4); 3005 free_page((unsigned long)cb4);
3013 } 3006 }
3014out: 3007out:
3015 return ret; 3008 return ret;
@@ -3069,6 +3062,22 @@ static void ehea_unregister_port(struct ehea_port *port)
3069 of_device_unregister(&port->ofdev); 3062 of_device_unregister(&port->ofdev);
3070} 3063}
3071 3064
3065static const struct net_device_ops ehea_netdev_ops = {
3066 .ndo_open = ehea_open,
3067 .ndo_stop = ehea_stop,
3068 .ndo_start_xmit = ehea_start_xmit,
3069#ifdef CONFIG_NET_POLL_CONTROLLER
3070 .ndo_poll_controller = ehea_netpoll,
3071#endif
3072 .ndo_get_stats = ehea_get_stats,
3073 .ndo_set_mac_address = ehea_set_mac_addr,
3074 .ndo_set_multicast_list = ehea_set_multicast_list,
3075 .ndo_change_mtu = ehea_change_mtu,
3076 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3077 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3078 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid
3079};
3080
3072struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, 3081struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3073 u32 logical_port_id, 3082 u32 logical_port_id,
3074 struct device_node *dn) 3083 struct device_node *dn)
@@ -3121,19 +3130,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3121 /* initialize net_device structure */ 3130 /* initialize net_device structure */
3122 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); 3131 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3123 3132
3124 dev->open = ehea_open; 3133 dev->netdev_ops = &ehea_netdev_ops;
3125#ifdef CONFIG_NET_POLL_CONTROLLER 3134 ehea_set_ethtool_ops(dev);
3126 dev->poll_controller = ehea_netpoll; 3135
3127#endif
3128 dev->stop = ehea_stop;
3129 dev->hard_start_xmit = ehea_start_xmit;
3130 dev->get_stats = ehea_get_stats;
3131 dev->set_multicast_list = ehea_set_multicast_list;
3132 dev->set_mac_address = ehea_set_mac_addr;
3133 dev->change_mtu = ehea_change_mtu;
3134 dev->vlan_rx_register = ehea_vlan_rx_register;
3135 dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
3136 dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
3137 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO 3136 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3138 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX 3137 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3139 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3138 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
@@ -3142,7 +3141,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3142 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3141 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3143 3142
3144 INIT_WORK(&port->reset_task, ehea_reset_port); 3143 INIT_WORK(&port->reset_task, ehea_reset_port);
3145 ehea_set_ethtool_ops(dev);
3146 3144
3147 ret = register_netdev(dev); 3145 ret = register_netdev(dev);
3148 if (ret) { 3146 if (ret) {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 49d766ebbcf4..3747457f5e69 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -1005,7 +1005,7 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
1005 unsigned long ret; 1005 unsigned long ret;
1006 u64 *rblock; 1006 u64 *rblock;
1007 1007
1008 rblock = kzalloc(PAGE_SIZE, GFP_KERNEL); 1008 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1009 if (!rblock) { 1009 if (!rblock) {
1010 ehea_error("Cannot allocate rblock memory."); 1010 ehea_error("Cannot allocate rblock memory.");
1011 return; 1011 return;
@@ -1022,5 +1022,5 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
1022 else 1022 else
1023 ehea_error("Error data could not be fetched: %llX", res_handle); 1023 ehea_error("Error data could not be fetched: %llX", res_handle);
1024 1024
1025 kfree(rblock); 1025 free_page((unsigned long)rblock);
1026} 1026}
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 7d60551d538f..4617956821cd 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -411,8 +411,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
411 } 411 }
412 412
413 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { 413 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
414 if (netif_rx_schedule_prep(&enic->napi)) 414 if (napi_schedule_prep(&enic->napi))
415 __netif_rx_schedule(&enic->napi); 415 __napi_schedule(&enic->napi);
416 } else { 416 } else {
417 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 417 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
418 } 418 }
@@ -440,7 +440,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
440 * writes). 440 * writes).
441 */ 441 */
442 442
443 netif_rx_schedule(&enic->napi); 443 napi_schedule(&enic->napi);
444 444
445 return IRQ_HANDLED; 445 return IRQ_HANDLED;
446} 446}
@@ -450,7 +450,7 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
450 struct enic *enic = data; 450 struct enic *enic = data;
451 451
452 /* schedule NAPI polling for RQ cleanup */ 452 /* schedule NAPI polling for RQ cleanup */
453 netif_rx_schedule(&enic->napi); 453 napi_schedule(&enic->napi);
454 454
455 return IRQ_HANDLED; 455 return IRQ_HANDLED;
456} 456}
@@ -1068,7 +1068,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1068 if (netdev->features & NETIF_F_LRO) 1068 if (netdev->features & NETIF_F_LRO)
1069 lro_flush_all(&enic->lro_mgr); 1069 lro_flush_all(&enic->lro_mgr);
1070 1070
1071 netif_rx_complete(napi); 1071 napi_complete(napi);
1072 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); 1072 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1073 } 1073 }
1074 1074
@@ -1112,7 +1112,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1112 if (netdev->features & NETIF_F_LRO) 1112 if (netdev->features & NETIF_F_LRO)
1113 lro_flush_all(&enic->lro_mgr); 1113 lro_flush_all(&enic->lro_mgr);
1114 1114
1115 netif_rx_complete(napi); 1115 napi_complete(napi);
1116 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); 1116 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1117 } 1117 }
1118 1118
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index a539bc3163cf..b60e27dfcfa7 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1114,9 +1114,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1114 1114
1115 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { 1115 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1116 spin_lock(&ep->napi_lock); 1116 spin_lock(&ep->napi_lock);
1117 if (netif_rx_schedule_prep(&ep->napi)) { 1117 if (napi_schedule_prep(&ep->napi)) {
1118 epic_napi_irq_off(dev, ep); 1118 epic_napi_irq_off(dev, ep);
1119 __netif_rx_schedule(&ep->napi); 1119 __napi_schedule(&ep->napi);
1120 } else 1120 } else
1121 ep->reschedule_in_poll++; 1121 ep->reschedule_in_poll++;
1122 spin_unlock(&ep->napi_lock); 1122 spin_unlock(&ep->napi_lock);
@@ -1293,7 +1293,7 @@ rx_action:
1293 1293
1294 more = ep->reschedule_in_poll; 1294 more = ep->reschedule_in_poll;
1295 if (!more) { 1295 if (!more) {
1296 __netif_rx_complete(napi); 1296 __napi_complete(napi);
1297 outl(EpicNapiEvent, ioaddr + INTSTAT); 1297 outl(EpicNapiEvent, ioaddr + INTSTAT);
1298 epic_napi_irq_on(dev, ep); 1298 epic_napi_irq_on(dev, ep);
1299 } else 1299 } else
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5b910cf63740..875509d7d86b 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1760,7 +1760,7 @@ static void nv_do_rx_refill(unsigned long data)
1760 struct fe_priv *np = netdev_priv(dev); 1760 struct fe_priv *np = netdev_priv(dev);
1761 1761
1762 /* Just reschedule NAPI rx processing */ 1762 /* Just reschedule NAPI rx processing */
1763 netif_rx_schedule(&np->napi); 1763 napi_schedule(&np->napi);
1764} 1764}
1765#else 1765#else
1766static void nv_do_rx_refill(unsigned long data) 1766static void nv_do_rx_refill(unsigned long data)
@@ -3406,7 +3406,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3406#ifdef CONFIG_FORCEDETH_NAPI 3406#ifdef CONFIG_FORCEDETH_NAPI
3407 if (events & NVREG_IRQ_RX_ALL) { 3407 if (events & NVREG_IRQ_RX_ALL) {
3408 spin_lock(&np->lock); 3408 spin_lock(&np->lock);
3409 netif_rx_schedule(&np->napi); 3409 napi_schedule(&np->napi);
3410 3410
3411 /* Disable furthur receive irq's */ 3411 /* Disable furthur receive irq's */
3412 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3412 np->irqmask &= ~NVREG_IRQ_RX_ALL;
@@ -3523,7 +3523,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3523#ifdef CONFIG_FORCEDETH_NAPI 3523#ifdef CONFIG_FORCEDETH_NAPI
3524 if (events & NVREG_IRQ_RX_ALL) { 3524 if (events & NVREG_IRQ_RX_ALL) {
3525 spin_lock(&np->lock); 3525 spin_lock(&np->lock);
3526 netif_rx_schedule(&np->napi); 3526 napi_schedule(&np->napi);
3527 3527
3528 /* Disable furthur receive irq's */ 3528 /* Disable furthur receive irq's */
3529 np->irqmask &= ~NVREG_IRQ_RX_ALL; 3529 np->irqmask &= ~NVREG_IRQ_RX_ALL;
@@ -3680,7 +3680,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3680 /* re-enable receive interrupts */ 3680 /* re-enable receive interrupts */
3681 spin_lock_irqsave(&np->lock, flags); 3681 spin_lock_irqsave(&np->lock, flags);
3682 3682
3683 __netif_rx_complete(napi); 3683 __napi_complete(napi);
3684 3684
3685 np->irqmask |= NVREG_IRQ_RX_ALL; 3685 np->irqmask |= NVREG_IRQ_RX_ALL;
3686 if (np->msi_flags & NV_MSI_X_ENABLED) 3686 if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -3706,7 +3706,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3706 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3706 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3707 3707
3708 if (events) { 3708 if (events) {
3709 netif_rx_schedule(&np->napi); 3709 napi_schedule(&np->napi);
3710 /* disable receive interrupts on the nic */ 3710 /* disable receive interrupts on the nic */
3711 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3711 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3712 pci_push(base); 3712 pci_push(base);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index ce900e54d8d1..b037ce9857bf 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -209,7 +209,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
209 209
210 if (received < budget) { 210 if (received < budget) {
211 /* done */ 211 /* done */
212 netif_rx_complete(napi); 212 napi_complete(napi);
213 (*fep->ops->napi_enable_rx)(dev); 213 (*fep->ops->napi_enable_rx)(dev);
214 } 214 }
215 return received; 215 return received;
@@ -478,7 +478,7 @@ fs_enet_interrupt(int irq, void *dev_id)
478 /* NOTE: it is possible for FCCs in NAPI mode */ 478 /* NOTE: it is possible for FCCs in NAPI mode */
479 /* to submit a spurious interrupt while in poll */ 479 /* to submit a spurious interrupt while in poll */
480 if (napi_ok) 480 if (napi_ok)
481 __netif_rx_schedule(&fep->napi); 481 __napi_schedule(&fep->napi);
482 } 482 }
483 } 483 }
484 484
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 3f7eab42aef1..f5e6068f9b07 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1623,9 +1623,9 @@ static void gfar_schedule_cleanup(struct net_device *dev)
1623 spin_lock_irqsave(&priv->txlock, flags); 1623 spin_lock_irqsave(&priv->txlock, flags);
1624 spin_lock(&priv->rxlock); 1624 spin_lock(&priv->rxlock);
1625 1625
1626 if (netif_rx_schedule_prep(&priv->napi)) { 1626 if (napi_schedule_prep(&priv->napi)) {
1627 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); 1627 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1628 __netif_rx_schedule(&priv->napi); 1628 __napi_schedule(&priv->napi);
1629 } 1629 }
1630 1630
1631 spin_unlock(&priv->rxlock); 1631 spin_unlock(&priv->rxlock);
@@ -1882,7 +1882,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1882 return budget; 1882 return budget;
1883 1883
1884 if (rx_cleaned < budget) { 1884 if (rx_cleaned < budget) {
1885 netif_rx_complete(napi); 1885 napi_complete(napi);
1886 1886
1887 /* Clear the halt bit in RSTAT */ 1887 /* Clear the halt bit in RSTAT */
1888 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1888 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 2d4089894ec7..3da9f394b4c6 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -322,23 +322,25 @@ static const struct header_ops sp_header_ops = {
322 .rebuild = sp_rebuild_header, 322 .rebuild = sp_rebuild_header,
323}; 323};
324 324
325static const struct net_device_ops sp_netdev_ops = {
326 .ndo_open = sp_open_dev,
327 .ndo_stop = sp_close,
328 .ndo_start_xmit = sp_xmit,
329 .ndo_set_mac_address = sp_set_mac_address,
330};
331
325static void sp_setup(struct net_device *dev) 332static void sp_setup(struct net_device *dev)
326{ 333{
327 /* Finish setting up the DEVICE info. */ 334 /* Finish setting up the DEVICE info. */
328 dev->mtu = SIXP_MTU; 335 dev->netdev_ops = &sp_netdev_ops;
329 dev->hard_start_xmit = sp_xmit;
330 dev->open = sp_open_dev;
331 dev->destructor = free_netdev; 336 dev->destructor = free_netdev;
332 dev->stop = sp_close; 337 dev->mtu = SIXP_MTU;
333
334 dev->set_mac_address = sp_set_mac_address;
335 dev->hard_header_len = AX25_MAX_HEADER_LEN; 338 dev->hard_header_len = AX25_MAX_HEADER_LEN;
336 dev->header_ops = &sp_header_ops; 339 dev->header_ops = &sp_header_ops;
337 340
338 dev->addr_len = AX25_ADDR_LEN; 341 dev->addr_len = AX25_ADDR_LEN;
339 dev->type = ARPHRD_AX25; 342 dev->type = ARPHRD_AX25;
340 dev->tx_queue_len = 10; 343 dev->tx_queue_len = 10;
341 dev->tx_timeout = NULL;
342 344
343 /* Only activated in AX.25 mode */ 345 /* Only activated in AX.25 mode */
344 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 346 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 81a65e3a1c05..bb78c11559cd 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -203,7 +203,6 @@ struct baycom_state {
203 unsigned char buf[TXBUFFER_SIZE]; 203 unsigned char buf[TXBUFFER_SIZE];
204 } hdlctx; 204 } hdlctx;
205 205
206 struct net_device_stats stats;
207 unsigned int ptt_keyed; 206 unsigned int ptt_keyed;
208 struct sk_buff *skb; /* next transmit packet */ 207 struct sk_buff *skb; /* next transmit packet */
209 208
@@ -423,7 +422,7 @@ static void encode_hdlc(struct baycom_state *bc)
423 bc->hdlctx.bufptr = bc->hdlctx.buf; 422 bc->hdlctx.bufptr = bc->hdlctx.buf;
424 bc->hdlctx.bufcnt = wp - bc->hdlctx.buf; 423 bc->hdlctx.bufcnt = wp - bc->hdlctx.buf;
425 dev_kfree_skb(skb); 424 dev_kfree_skb(skb);
426 bc->stats.tx_packets++; 425 bc->dev->stats.tx_packets++;
427} 426}
428 427
429/* ---------------------------------------------------------------------- */ 428/* ---------------------------------------------------------------------- */
@@ -547,7 +546,7 @@ static void do_rxpacket(struct net_device *dev)
547 pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */ 546 pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */
548 if (!(skb = dev_alloc_skb(pktlen))) { 547 if (!(skb = dev_alloc_skb(pktlen))) {
549 printk("%s: memory squeeze, dropping packet\n", dev->name); 548 printk("%s: memory squeeze, dropping packet\n", dev->name);
550 bc->stats.rx_dropped++; 549 dev->stats.rx_dropped++;
551 return; 550 return;
552 } 551 }
553 cp = skb_put(skb, pktlen); 552 cp = skb_put(skb, pktlen);
@@ -555,7 +554,7 @@ static void do_rxpacket(struct net_device *dev)
555 memcpy(cp, bc->hdlcrx.buf, pktlen - 1); 554 memcpy(cp, bc->hdlcrx.buf, pktlen - 1);
556 skb->protocol = ax25_type_trans(skb, dev); 555 skb->protocol = ax25_type_trans(skb, dev);
557 netif_rx(skb); 556 netif_rx(skb);
558 bc->stats.rx_packets++; 557 dev->stats.rx_packets++;
559} 558}
560 559
561static int receive(struct net_device *dev, int cnt) 560static int receive(struct net_device *dev, int cnt)
@@ -802,19 +801,6 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr)
802 801
803/* --------------------------------------------------------------------- */ 802/* --------------------------------------------------------------------- */
804 803
805static struct net_device_stats *baycom_get_stats(struct net_device *dev)
806{
807 struct baycom_state *bc = netdev_priv(dev);
808
809 /*
810 * Get the current statistics. This may be called with the
811 * card open or closed.
812 */
813 return &bc->stats;
814}
815
816/* --------------------------------------------------------------------- */
817
818static void epp_wakeup(void *handle) 804static void epp_wakeup(void *handle)
819{ 805{
820 struct net_device *dev = (struct net_device *)handle; 806 struct net_device *dev = (struct net_device *)handle;
@@ -1065,10 +1051,10 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1065 hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT); 1051 hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT);
1066 hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT); 1052 hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT);
1067 hi.data.cs.ptt_keyed = bc->ptt_keyed; 1053 hi.data.cs.ptt_keyed = bc->ptt_keyed;
1068 hi.data.cs.tx_packets = bc->stats.tx_packets; 1054 hi.data.cs.tx_packets = dev->stats.tx_packets;
1069 hi.data.cs.tx_errors = bc->stats.tx_errors; 1055 hi.data.cs.tx_errors = dev->stats.tx_errors;
1070 hi.data.cs.rx_packets = bc->stats.rx_packets; 1056 hi.data.cs.rx_packets = dev->stats.rx_packets;
1071 hi.data.cs.rx_errors = bc->stats.rx_errors; 1057 hi.data.cs.rx_errors = dev->stats.rx_errors;
1072 break; 1058 break;
1073 1059
1074 case HDLCDRVCTL_OLDGETSTAT: 1060 case HDLCDRVCTL_OLDGETSTAT:
@@ -1116,6 +1102,14 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1116 1102
1117/* --------------------------------------------------------------------- */ 1103/* --------------------------------------------------------------------- */
1118 1104
1105static const struct net_device_ops baycom_netdev_ops = {
1106 .ndo_open = epp_open,
1107 .ndo_stop = epp_close,
1108 .ndo_do_ioctl = baycom_ioctl,
1109 .ndo_start_xmit = baycom_send_packet,
1110 .ndo_set_mac_address = baycom_set_mac_address,
1111};
1112
1119/* 1113/*
1120 * Check for a network adaptor of this type, and return '0' if one exists. 1114 * Check for a network adaptor of this type, and return '0' if one exists.
1121 * If dev->base_addr == 0, probe all likely locations. 1115 * If dev->base_addr == 0, probe all likely locations.
@@ -1143,17 +1137,12 @@ static void baycom_probe(struct net_device *dev)
1143 /* 1137 /*
1144 * initialize the device struct 1138 * initialize the device struct
1145 */ 1139 */
1146 dev->open = epp_open;
1147 dev->stop = epp_close;
1148 dev->do_ioctl = baycom_ioctl;
1149 dev->hard_start_xmit = baycom_send_packet;
1150 dev->get_stats = baycom_get_stats;
1151 1140
1152 /* Fill in the fields of the device structure */ 1141 /* Fill in the fields of the device structure */
1153 bc->skb = NULL; 1142 bc->skb = NULL;
1154 1143
1144 dev->netdev_ops = &baycom_netdev_ops;
1155 dev->header_ops = &ax25_header_ops; 1145 dev->header_ops = &ax25_header_ops;
1156 dev->set_mac_address = baycom_set_mac_address;
1157 1146
1158 dev->type = ARPHRD_AX25; /* AF_AX25 device */ 1147 dev->type = ARPHRD_AX25; /* AF_AX25 device */
1159 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; 1148 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 46f8f3390e7d..1f65d1edf132 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -110,7 +110,6 @@ struct bpqdev {
110 struct list_head bpq_list; /* list of bpq devices chain */ 110 struct list_head bpq_list; /* list of bpq devices chain */
111 struct net_device *ethdev; /* link to ethernet device */ 111 struct net_device *ethdev; /* link to ethernet device */
112 struct net_device *axdev; /* bpq device (bpq#) */ 112 struct net_device *axdev; /* bpq device (bpq#) */
113 struct net_device_stats stats; /* some statistics */
114 char dest_addr[6]; /* ether destination address */ 113 char dest_addr[6]; /* ether destination address */
115 char acpt_addr[6]; /* accept ether frames from this address only */ 114 char acpt_addr[6]; /* accept ether frames from this address only */
116}; 115};
@@ -222,8 +221,8 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
222 skb_pull(skb, 2); /* Remove the length bytes */ 221 skb_pull(skb, 2); /* Remove the length bytes */
223 skb_trim(skb, len); /* Set the length of the data */ 222 skb_trim(skb, len); /* Set the length of the data */
224 223
225 bpq->stats.rx_packets++; 224 dev->stats.rx_packets++;
226 bpq->stats.rx_bytes += len; 225 dev->stats.rx_bytes += len;
227 226
228 ptr = skb_push(skb, 1); 227 ptr = skb_push(skb, 1);
229 *ptr = 0; 228 *ptr = 0;
@@ -292,7 +291,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
292 bpq = netdev_priv(dev); 291 bpq = netdev_priv(dev);
293 292
294 if ((dev = bpq_get_ether_dev(dev)) == NULL) { 293 if ((dev = bpq_get_ether_dev(dev)) == NULL) {
295 bpq->stats.tx_dropped++; 294 dev->stats.tx_dropped++;
296 kfree_skb(skb); 295 kfree_skb(skb);
297 return -ENODEV; 296 return -ENODEV;
298 } 297 }
@@ -300,8 +299,8 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
300 skb->protocol = ax25_type_trans(skb, dev); 299 skb->protocol = ax25_type_trans(skb, dev);
301 skb_reset_network_header(skb); 300 skb_reset_network_header(skb);
302 dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); 301 dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
303 bpq->stats.tx_packets++; 302 dev->stats.tx_packets++;
304 bpq->stats.tx_bytes+=skb->len; 303 dev->stats.tx_bytes+=skb->len;
305 304
306 dev_queue_xmit(skb); 305 dev_queue_xmit(skb);
307 netif_wake_queue(dev); 306 netif_wake_queue(dev);
@@ -309,16 +308,6 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
309} 308}
310 309
311/* 310/*
312 * Statistics
313 */
314static struct net_device_stats *bpq_get_stats(struct net_device *dev)
315{
316 struct bpqdev *bpq = netdev_priv(dev);
317
318 return &bpq->stats;
319}
320
321/*
322 * Set AX.25 callsign 311 * Set AX.25 callsign
323 */ 312 */
324static int bpq_set_mac_address(struct net_device *dev, void *addr) 313static int bpq_set_mac_address(struct net_device *dev, void *addr)
@@ -454,7 +443,7 @@ static int bpq_seq_show(struct seq_file *seq, void *v)
454 return 0; 443 return 0;
455} 444}
456 445
457static struct seq_operations bpq_seqops = { 446static const struct seq_operations bpq_seqops = {
458 .start = bpq_seq_start, 447 .start = bpq_seq_start,
459 .next = bpq_seq_next, 448 .next = bpq_seq_next,
460 .stop = bpq_seq_stop, 449 .stop = bpq_seq_stop,
@@ -477,16 +466,17 @@ static const struct file_operations bpq_info_fops = {
477 466
478/* ------------------------------------------------------------------------ */ 467/* ------------------------------------------------------------------------ */
479 468
469static const struct net_device_ops bpq_netdev_ops = {
470 .ndo_open = bpq_open,
471 .ndo_stop = bpq_close,
472 .ndo_start_xmit = bpq_xmit,
473 .ndo_set_mac_address = bpq_set_mac_address,
474 .ndo_do_ioctl = bpq_ioctl,
475};
480 476
481static void bpq_setup(struct net_device *dev) 477static void bpq_setup(struct net_device *dev)
482{ 478{
483 479 dev->netdev_ops = &bpq_netdev_ops;
484 dev->hard_start_xmit = bpq_xmit;
485 dev->open = bpq_open;
486 dev->stop = bpq_close;
487 dev->set_mac_address = bpq_set_mac_address;
488 dev->get_stats = bpq_get_stats;
489 dev->do_ioctl = bpq_ioctl;
490 dev->destructor = free_netdev; 480 dev->destructor = free_netdev;
491 481
492 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 482 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index e67103396ed7..881bf818bb48 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -195,7 +195,7 @@ struct scc_priv {
195 int chip; 195 int chip;
196 struct net_device *dev; 196 struct net_device *dev;
197 struct scc_info *info; 197 struct scc_info *info;
198 struct net_device_stats stats; 198
199 int channel; 199 int channel;
200 int card_base, scc_cmd, scc_data; 200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode; 201 int tmr_cnt, tmr_ctrl, tmr_mode;
@@ -239,7 +239,6 @@ static int scc_open(struct net_device *dev);
239static int scc_close(struct net_device *dev); 239static int scc_close(struct net_device *dev);
240static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 240static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241static int scc_send_packet(struct sk_buff *skb, struct net_device *dev); 241static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242static struct net_device_stats *scc_get_stats(struct net_device *dev);
243static int scc_set_mac_address(struct net_device *dev, void *sa); 242static int scc_set_mac_address(struct net_device *dev, void *sa);
244 243
245static inline void tx_on(struct scc_priv *priv); 244static inline void tx_on(struct scc_priv *priv);
@@ -441,6 +440,13 @@ static void __init dev_setup(struct net_device *dev)
441 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); 440 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
442} 441}
443 442
443static const struct net_device_ops scc_netdev_ops = {
444 .ndo_open = scc_open,
445 .ndo_stop = scc_close,
446 .ndo_start_xmit = scc_send_packet,
447 .ndo_do_ioctl = scc_ioctl,
448};
449
444static int __init setup_adapter(int card_base, int type, int n) 450static int __init setup_adapter(int card_base, int type, int n)
445{ 451{
446 int i, irq, chip; 452 int i, irq, chip;
@@ -576,11 +582,7 @@ static int __init setup_adapter(int card_base, int type, int n)
576 sprintf(dev->name, "dmascc%i", 2 * n + i); 582 sprintf(dev->name, "dmascc%i", 2 * n + i);
577 dev->base_addr = card_base; 583 dev->base_addr = card_base;
578 dev->irq = irq; 584 dev->irq = irq;
579 dev->open = scc_open; 585 dev->netdev_ops = &scc_netdev_ops;
580 dev->stop = scc_close;
581 dev->do_ioctl = scc_ioctl;
582 dev->hard_start_xmit = scc_send_packet;
583 dev->get_stats = scc_get_stats;
584 dev->header_ops = &ax25_header_ops; 586 dev->header_ops = &ax25_header_ops;
585 dev->set_mac_address = scc_set_mac_address; 587 dev->set_mac_address = scc_set_mac_address;
586 } 588 }
@@ -961,14 +963,6 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
961} 963}
962 964
963 965
964static struct net_device_stats *scc_get_stats(struct net_device *dev)
965{
966 struct scc_priv *priv = dev->ml_priv;
967
968 return &priv->stats;
969}
970
971
972static int scc_set_mac_address(struct net_device *dev, void *sa) 966static int scc_set_mac_address(struct net_device *dev, void *sa)
973{ 967{
974 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data, 968 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
@@ -1216,17 +1210,17 @@ static void special_condition(struct scc_priv *priv, int rc)
1216 } 1210 }
1217 if (priv->rx_over) { 1211 if (priv->rx_over) {
1218 /* We had an overrun */ 1212 /* We had an overrun */
1219 priv->stats.rx_errors++; 1213 priv->dev->stats.rx_errors++;
1220 if (priv->rx_over == 2) 1214 if (priv->rx_over == 2)
1221 priv->stats.rx_length_errors++; 1215 priv->dev->stats.rx_length_errors++;
1222 else 1216 else
1223 priv->stats.rx_fifo_errors++; 1217 priv->dev->stats.rx_fifo_errors++;
1224 priv->rx_over = 0; 1218 priv->rx_over = 0;
1225 } else if (rc & CRC_ERR) { 1219 } else if (rc & CRC_ERR) {
1226 /* Count invalid CRC only if packet length >= minimum */ 1220 /* Count invalid CRC only if packet length >= minimum */
1227 if (cb >= 15) { 1221 if (cb >= 15) {
1228 priv->stats.rx_errors++; 1222 priv->dev->stats.rx_errors++;
1229 priv->stats.rx_crc_errors++; 1223 priv->dev->stats.rx_crc_errors++;
1230 } 1224 }
1231 } else { 1225 } else {
1232 if (cb >= 15) { 1226 if (cb >= 15) {
@@ -1239,8 +1233,8 @@ static void special_condition(struct scc_priv *priv, int rc)
1239 priv->rx_count++; 1233 priv->rx_count++;
1240 schedule_work(&priv->rx_work); 1234 schedule_work(&priv->rx_work);
1241 } else { 1235 } else {
1242 priv->stats.rx_errors++; 1236 priv->dev->stats.rx_errors++;
1243 priv->stats.rx_over_errors++; 1237 priv->dev->stats.rx_over_errors++;
1244 } 1238 }
1245 } 1239 }
1246 } 1240 }
@@ -1275,7 +1269,7 @@ static void rx_bh(struct work_struct *ugli_api)
1275 skb = dev_alloc_skb(cb + 1); 1269 skb = dev_alloc_skb(cb + 1);
1276 if (skb == NULL) { 1270 if (skb == NULL) {
1277 /* Drop packet */ 1271 /* Drop packet */
1278 priv->stats.rx_dropped++; 1272 priv->dev->stats.rx_dropped++;
1279 } else { 1273 } else {
1280 /* Fill buffer */ 1274 /* Fill buffer */
1281 data = skb_put(skb, cb + 1); 1275 data = skb_put(skb, cb + 1);
@@ -1283,8 +1277,8 @@ static void rx_bh(struct work_struct *ugli_api)
1283 memcpy(&data[1], priv->rx_buf[i], cb); 1277 memcpy(&data[1], priv->rx_buf[i], cb);
1284 skb->protocol = ax25_type_trans(skb, priv->dev); 1278 skb->protocol = ax25_type_trans(skb, priv->dev);
1285 netif_rx(skb); 1279 netif_rx(skb);
1286 priv->stats.rx_packets++; 1280 priv->dev->stats.rx_packets++;
1287 priv->stats.rx_bytes += cb; 1281 priv->dev->stats.rx_bytes += cb;
1288 } 1282 }
1289 spin_lock_irqsave(&priv->ring_lock, flags); 1283 spin_lock_irqsave(&priv->ring_lock, flags);
1290 /* Move tail */ 1284 /* Move tail */
@@ -1351,15 +1345,15 @@ static void es_isr(struct scc_priv *priv)
1351 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); 1345 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1352 if (res) { 1346 if (res) {
1353 /* Update packet statistics */ 1347 /* Update packet statistics */
1354 priv->stats.tx_errors++; 1348 priv->dev->stats.tx_errors++;
1355 priv->stats.tx_fifo_errors++; 1349 priv->dev->stats.tx_fifo_errors++;
1356 /* Other underrun interrupts may already be waiting */ 1350 /* Other underrun interrupts may already be waiting */
1357 write_scc(priv, R0, RES_EXT_INT); 1351 write_scc(priv, R0, RES_EXT_INT);
1358 write_scc(priv, R0, RES_EXT_INT); 1352 write_scc(priv, R0, RES_EXT_INT);
1359 } else { 1353 } else {
1360 /* Update packet statistics */ 1354 /* Update packet statistics */
1361 priv->stats.tx_packets++; 1355 priv->dev->stats.tx_packets++;
1362 priv->stats.tx_bytes += priv->tx_len[i]; 1356 priv->dev->stats.tx_bytes += priv->tx_len[i];
1363 /* Remove frame from FIFO */ 1357 /* Remove frame from FIFO */
1364 priv->tx_tail = (i + 1) % NUM_TX_BUF; 1358 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1365 priv->tx_count--; 1359 priv->tx_count--;
@@ -1425,7 +1419,7 @@ static void tm_isr(struct scc_priv *priv)
1425 write_scc(priv, R15, DCDIE); 1419 write_scc(priv, R15, DCDIE);
1426 priv->rr0 = read_scc(priv, R0); 1420 priv->rr0 = read_scc(priv, R0);
1427 if (priv->rr0 & DCD) { 1421 if (priv->rr0 & DCD) {
1428 priv->stats.collisions++; 1422 priv->dev->stats.collisions++;
1429 rx_on(priv); 1423 rx_on(priv);
1430 priv->state = RX_ON; 1424 priv->state = RX_ON;
1431 } else { 1425 } else {
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8eba61a1d4ab..61de56e45eed 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -154,7 +154,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s)
154 pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */ 154 pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */
155 if (!(skb = dev_alloc_skb(pkt_len))) { 155 if (!(skb = dev_alloc_skb(pkt_len))) {
156 printk("%s: memory squeeze, dropping packet\n", dev->name); 156 printk("%s: memory squeeze, dropping packet\n", dev->name);
157 s->stats.rx_dropped++; 157 dev->stats.rx_dropped++;
158 return; 158 return;
159 } 159 }
160 cp = skb_put(skb, pkt_len); 160 cp = skb_put(skb, pkt_len);
@@ -162,7 +162,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s)
162 memcpy(cp, s->hdlcrx.buffer, pkt_len - 1); 162 memcpy(cp, s->hdlcrx.buffer, pkt_len - 1);
163 skb->protocol = ax25_type_trans(skb, dev); 163 skb->protocol = ax25_type_trans(skb, dev);
164 netif_rx(skb); 164 netif_rx(skb);
165 s->stats.rx_packets++; 165 dev->stats.rx_packets++;
166} 166}
167 167
168void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s) 168void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s)
@@ -326,7 +326,7 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s)
326 s->hdlctx.len = pkt_len+2; /* the appended CRC */ 326 s->hdlctx.len = pkt_len+2; /* the appended CRC */
327 s->hdlctx.tx_state = 2; 327 s->hdlctx.tx_state = 2;
328 s->hdlctx.bitstream = 0; 328 s->hdlctx.bitstream = 0;
329 s->stats.tx_packets++; 329 dev->stats.tx_packets++;
330 break; 330 break;
331 case 2: 331 case 2:
332 if (!s->hdlctx.len) { 332 if (!s->hdlctx.len) {
@@ -427,19 +427,6 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr)
427} 427}
428 428
429/* --------------------------------------------------------------------- */ 429/* --------------------------------------------------------------------- */
430
431static struct net_device_stats *hdlcdrv_get_stats(struct net_device *dev)
432{
433 struct hdlcdrv_state *sm = netdev_priv(dev);
434
435 /*
436 * Get the current statistics. This may be called with the
437 * card open or closed.
438 */
439 return &sm->stats;
440}
441
442/* --------------------------------------------------------------------- */
443/* 430/*
444 * Open/initialize the board. This is called (in the current kernel) 431 * Open/initialize the board. This is called (in the current kernel)
445 * sometime after booting when the 'ifconfig' program is run. 432 * sometime after booting when the 'ifconfig' program is run.
@@ -568,10 +555,10 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
568 bi.data.cs.ptt = hdlcdrv_ptt(s); 555 bi.data.cs.ptt = hdlcdrv_ptt(s);
569 bi.data.cs.dcd = s->hdlcrx.dcd; 556 bi.data.cs.dcd = s->hdlcrx.dcd;
570 bi.data.cs.ptt_keyed = s->ptt_keyed; 557 bi.data.cs.ptt_keyed = s->ptt_keyed;
571 bi.data.cs.tx_packets = s->stats.tx_packets; 558 bi.data.cs.tx_packets = dev->stats.tx_packets;
572 bi.data.cs.tx_errors = s->stats.tx_errors; 559 bi.data.cs.tx_errors = dev->stats.tx_errors;
573 bi.data.cs.rx_packets = s->stats.rx_packets; 560 bi.data.cs.rx_packets = dev->stats.rx_packets;
574 bi.data.cs.rx_errors = s->stats.rx_errors; 561 bi.data.cs.rx_errors = dev->stats.rx_errors;
575 break; 562 break;
576 563
577 case HDLCDRVCTL_OLDGETSTAT: 564 case HDLCDRVCTL_OLDGETSTAT:
@@ -630,6 +617,14 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
630 617
631/* --------------------------------------------------------------------- */ 618/* --------------------------------------------------------------------- */
632 619
620static const struct net_device_ops hdlcdrv_netdev = {
621 .ndo_open = hdlcdrv_open,
622 .ndo_stop = hdlcdrv_close,
623 .ndo_start_xmit = hdlcdrv_send_packet,
624 .ndo_do_ioctl = hdlcdrv_ioctl,
625 .ndo_set_mac_address = hdlcdrv_set_mac_address,
626};
627
633/* 628/*
634 * Initialize fields in hdlcdrv 629 * Initialize fields in hdlcdrv
635 */ 630 */
@@ -669,21 +664,13 @@ static void hdlcdrv_setup(struct net_device *dev)
669 s->bitbuf_hdlc.shreg = 0x80; 664 s->bitbuf_hdlc.shreg = 0x80;
670#endif /* HDLCDRV_DEBUG */ 665#endif /* HDLCDRV_DEBUG */
671 666
672 /*
673 * initialize the device struct
674 */
675 dev->open = hdlcdrv_open;
676 dev->stop = hdlcdrv_close;
677 dev->do_ioctl = hdlcdrv_ioctl;
678 dev->hard_start_xmit = hdlcdrv_send_packet;
679 dev->get_stats = hdlcdrv_get_stats;
680 667
681 /* Fill in the fields of the device structure */ 668 /* Fill in the fields of the device structure */
682 669
683 s->skb = NULL; 670 s->skb = NULL;
684 671
672 dev->netdev_ops = &hdlcdrv_netdev;
685 dev->header_ops = &ax25_header_ops; 673 dev->header_ops = &ax25_header_ops;
686 dev->set_mac_address = hdlcdrv_set_mac_address;
687 674
688 dev->type = ARPHRD_AX25; /* AF_AX25 device */ 675 dev->type = ARPHRD_AX25; /* AF_AX25 device */
689 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; 676 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index bbdb311b8420..ed5b37d43334 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -59,8 +59,6 @@ struct mkiss {
59 unsigned char *xhead; /* pointer to next byte to XMIT */ 59 unsigned char *xhead; /* pointer to next byte to XMIT */
60 int xleft; /* bytes left in XMIT queue */ 60 int xleft; /* bytes left in XMIT queue */
61 61
62 struct net_device_stats stats;
63
64 /* Detailed SLIP statistics. */ 62 /* Detailed SLIP statistics. */
65 int mtu; /* Our mtu (to spot changes!) */ 63 int mtu; /* Our mtu (to spot changes!) */
66 int buffsize; /* Max buffers sizes */ 64 int buffsize; /* Max buffers sizes */
@@ -253,7 +251,7 @@ static void ax_bump(struct mkiss *ax)
253 if (ax->rbuff[0] > 0x0f) { 251 if (ax->rbuff[0] > 0x0f) {
254 if (ax->rbuff[0] & 0x80) { 252 if (ax->rbuff[0] & 0x80) {
255 if (check_crc_16(ax->rbuff, ax->rcount) < 0) { 253 if (check_crc_16(ax->rbuff, ax->rcount) < 0) {
256 ax->stats.rx_errors++; 254 ax->dev->stats.rx_errors++;
257 spin_unlock_bh(&ax->buflock); 255 spin_unlock_bh(&ax->buflock);
258 256
259 return; 257 return;
@@ -268,7 +266,7 @@ static void ax_bump(struct mkiss *ax)
268 *ax->rbuff &= ~0x80; 266 *ax->rbuff &= ~0x80;
269 } else if (ax->rbuff[0] & 0x20) { 267 } else if (ax->rbuff[0] & 0x20) {
270 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) { 268 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
271 ax->stats.rx_errors++; 269 ax->dev->stats.rx_errors++;
272 spin_unlock_bh(&ax->buflock); 270 spin_unlock_bh(&ax->buflock);
273 return; 271 return;
274 } 272 }
@@ -295,7 +293,7 @@ static void ax_bump(struct mkiss *ax)
295 if ((skb = dev_alloc_skb(count)) == NULL) { 293 if ((skb = dev_alloc_skb(count)) == NULL) {
296 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", 294 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
297 ax->dev->name); 295 ax->dev->name);
298 ax->stats.rx_dropped++; 296 ax->dev->stats.rx_dropped++;
299 spin_unlock_bh(&ax->buflock); 297 spin_unlock_bh(&ax->buflock);
300 return; 298 return;
301 } 299 }
@@ -303,8 +301,8 @@ static void ax_bump(struct mkiss *ax)
303 memcpy(skb_put(skb,count), ax->rbuff, count); 301 memcpy(skb_put(skb,count), ax->rbuff, count);
304 skb->protocol = ax25_type_trans(skb, ax->dev); 302 skb->protocol = ax25_type_trans(skb, ax->dev);
305 netif_rx(skb); 303 netif_rx(skb);
306 ax->stats.rx_packets++; 304 ax->dev->stats.rx_packets++;
307 ax->stats.rx_bytes += count; 305 ax->dev->stats.rx_bytes += count;
308 spin_unlock_bh(&ax->buflock); 306 spin_unlock_bh(&ax->buflock);
309} 307}
310 308
@@ -344,7 +342,7 @@ static void kiss_unesc(struct mkiss *ax, unsigned char s)
344 return; 342 return;
345 } 343 }
346 344
347 ax->stats.rx_over_errors++; 345 ax->dev->stats.rx_over_errors++;
348 set_bit(AXF_ERROR, &ax->flags); 346 set_bit(AXF_ERROR, &ax->flags);
349 } 347 }
350 spin_unlock_bh(&ax->buflock); 348 spin_unlock_bh(&ax->buflock);
@@ -406,7 +404,7 @@ static void ax_changedmtu(struct mkiss *ax)
406 memcpy(ax->xbuff, ax->xhead, ax->xleft); 404 memcpy(ax->xbuff, ax->xhead, ax->xleft);
407 } else { 405 } else {
408 ax->xleft = 0; 406 ax->xleft = 0;
409 ax->stats.tx_dropped++; 407 dev->stats.tx_dropped++;
410 } 408 }
411 } 409 }
412 410
@@ -417,7 +415,7 @@ static void ax_changedmtu(struct mkiss *ax)
417 memcpy(ax->rbuff, orbuff, ax->rcount); 415 memcpy(ax->rbuff, orbuff, ax->rcount);
418 } else { 416 } else {
419 ax->rcount = 0; 417 ax->rcount = 0;
420 ax->stats.rx_over_errors++; 418 dev->stats.rx_over_errors++;
421 set_bit(AXF_ERROR, &ax->flags); 419 set_bit(AXF_ERROR, &ax->flags);
422 } 420 }
423 } 421 }
@@ -444,7 +442,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
444 if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */ 442 if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */
445 len = ax->mtu; 443 len = ax->mtu;
446 printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name); 444 printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name);
447 ax->stats.tx_dropped++; 445 dev->stats.tx_dropped++;
448 netif_start_queue(dev); 446 netif_start_queue(dev);
449 return; 447 return;
450 } 448 }
@@ -518,8 +516,8 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
518 516
519 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); 517 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
520 actual = ax->tty->ops->write(ax->tty, ax->xbuff, count); 518 actual = ax->tty->ops->write(ax->tty, ax->xbuff, count);
521 ax->stats.tx_packets++; 519 dev->stats.tx_packets++;
522 ax->stats.tx_bytes += actual; 520 dev->stats.tx_bytes += actual;
523 521
524 ax->dev->trans_start = jiffies; 522 ax->dev->trans_start = jiffies;
525 ax->xleft = count - actual; 523 ax->xleft = count - actual;
@@ -664,32 +662,28 @@ static int ax_close(struct net_device *dev)
664 return 0; 662 return 0;
665} 663}
666 664
667static struct net_device_stats *ax_get_stats(struct net_device *dev)
668{
669 struct mkiss *ax = netdev_priv(dev);
670
671 return &ax->stats;
672}
673
674static const struct header_ops ax_header_ops = { 665static const struct header_ops ax_header_ops = {
675 .create = ax_header, 666 .create = ax_header,
676 .rebuild = ax_rebuild_header, 667 .rebuild = ax_rebuild_header,
677}; 668};
678 669
670static const struct net_device_ops ax_netdev_ops = {
671 .ndo_open = ax_open_dev,
672 .ndo_stop = ax_close,
673 .ndo_start_xmit = ax_xmit,
674 .ndo_set_mac_address = ax_set_mac_address,
675};
676
679static void ax_setup(struct net_device *dev) 677static void ax_setup(struct net_device *dev)
680{ 678{
681 /* Finish setting up the DEVICE info. */ 679 /* Finish setting up the DEVICE info. */
682 dev->mtu = AX_MTU; 680 dev->mtu = AX_MTU;
683 dev->hard_start_xmit = ax_xmit;
684 dev->open = ax_open_dev;
685 dev->stop = ax_close;
686 dev->get_stats = ax_get_stats;
687 dev->set_mac_address = ax_set_mac_address;
688 dev->hard_header_len = 0; 681 dev->hard_header_len = 0;
689 dev->addr_len = 0; 682 dev->addr_len = 0;
690 dev->type = ARPHRD_AX25; 683 dev->type = ARPHRD_AX25;
691 dev->tx_queue_len = 10; 684 dev->tx_queue_len = 10;
692 dev->header_ops = &ax_header_ops; 685 dev->header_ops = &ax_header_ops;
686 dev->netdev_ops = &ax_netdev_ops;
693 687
694 688
695 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 689 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
@@ -929,7 +923,7 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
929 while (count--) { 923 while (count--) {
930 if (fp != NULL && *fp++) { 924 if (fp != NULL && *fp++) {
931 if (!test_and_set_bit(AXF_ERROR, &ax->flags)) 925 if (!test_and_set_bit(AXF_ERROR, &ax->flags))
932 ax->stats.rx_errors++; 926 ax->dev->stats.rx_errors++;
933 cp++; 927 cp++;
934 continue; 928 continue;
935 } 929 }
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index c011af7088ea..2acb18f06972 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1542,23 +1542,24 @@ static int scc_net_alloc(const char *name, struct scc_channel *scc)
1542/* * Network driver methods * */ 1542/* * Network driver methods * */
1543/* ******************************************************************** */ 1543/* ******************************************************************** */
1544 1544
1545static const struct net_device_ops scc_netdev_ops = {
1546 .ndo_open = scc_net_open,
1547 .ndo_stop = scc_net_close,
1548 .ndo_start_xmit = scc_net_tx,
1549 .ndo_set_mac_address = scc_net_set_mac_address,
1550 .ndo_get_stats = scc_net_get_stats,
1551 .ndo_do_ioctl = scc_net_ioctl,
1552};
1553
1545/* ----> Initialize device <----- */ 1554/* ----> Initialize device <----- */
1546 1555
1547static void scc_net_setup(struct net_device *dev) 1556static void scc_net_setup(struct net_device *dev)
1548{ 1557{
1549 dev->tx_queue_len = 16; /* should be enough... */ 1558 dev->tx_queue_len = 16; /* should be enough... */
1550 1559
1551 dev->open = scc_net_open; 1560 dev->netdev_ops = &scc_netdev_ops;
1552 dev->stop = scc_net_close;
1553
1554 dev->hard_start_xmit = scc_net_tx;
1555 dev->header_ops = &ax25_header_ops; 1561 dev->header_ops = &ax25_header_ops;
1556 1562
1557 dev->set_mac_address = scc_net_set_mac_address;
1558 dev->get_stats = scc_net_get_stats;
1559 dev->do_ioctl = scc_net_ioctl;
1560 dev->tx_timeout = NULL;
1561
1562 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 1563 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
1563 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); 1564 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
1564 1565
@@ -2073,7 +2074,7 @@ static int scc_net_seq_show(struct seq_file *seq, void *v)
2073 return 0; 2074 return 0;
2074} 2075}
2075 2076
2076static struct seq_operations scc_net_seq_ops = { 2077static const struct seq_operations scc_net_seq_ops = {
2077 .start = scc_net_seq_start, 2078 .start = scc_net_seq_start,
2078 .next = scc_net_seq_next, 2079 .next = scc_net_seq_next,
2079 .stop = scc_net_seq_stop, 2080 .stop = scc_net_seq_stop,
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5407f7486c9c..82a8be7613d6 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -115,10 +115,6 @@ struct yam_port {
115 115
116 struct net_device *dev; 116 struct net_device *dev;
117 117
118 /* Stats section */
119
120 struct net_device_stats stats;
121
122 int nb_rxint; 118 int nb_rxint;
123 int nb_mdint; 119 int nb_mdint;
124 120
@@ -507,7 +503,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
507 } else { 503 } else {
508 if (!(skb = dev_alloc_skb(pkt_len))) { 504 if (!(skb = dev_alloc_skb(pkt_len))) {
509 printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name); 505 printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
510 ++yp->stats.rx_dropped; 506 ++dev->stats.rx_dropped;
511 } else { 507 } else {
512 unsigned char *cp; 508 unsigned char *cp;
513 cp = skb_put(skb, pkt_len); 509 cp = skb_put(skb, pkt_len);
@@ -515,7 +511,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
515 memcpy(cp, yp->rx_buf, pkt_len - 1); 511 memcpy(cp, yp->rx_buf, pkt_len - 1);
516 skb->protocol = ax25_type_trans(skb, dev); 512 skb->protocol = ax25_type_trans(skb, dev);
517 netif_rx(skb); 513 netif_rx(skb);
518 ++yp->stats.rx_packets; 514 ++dev->stats.rx_packets;
519 } 515 }
520 } 516 }
521 } 517 }
@@ -677,7 +673,7 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
677 yp->tx_count = 1; 673 yp->tx_count = 1;
678 yp->tx_state = TX_HEAD; 674 yp->tx_state = TX_HEAD;
679 } 675 }
680 ++yp->stats.tx_packets; 676 ++dev->stats.tx_packets;
681 break; 677 break;
682 case TX_TAIL: 678 case TX_TAIL:
683 if (--yp->tx_count <= 0) { 679 if (--yp->tx_count <= 0) {
@@ -716,7 +712,7 @@ static irqreturn_t yam_interrupt(int irq, void *dev_id)
716 handled = 1; 712 handled = 1;
717 713
718 if (lsr & LSR_OE) 714 if (lsr & LSR_OE)
719 ++yp->stats.rx_fifo_errors; 715 ++dev->stats.rx_fifo_errors;
720 716
721 yp->dcd = (msr & RX_DCD) ? 1 : 0; 717 yp->dcd = (msr & RX_DCD) ? 1 : 0;
722 718
@@ -778,16 +774,16 @@ static int yam_seq_show(struct seq_file *seq, void *v)
778 seq_printf(seq, " TxTail %u\n", yp->txtail); 774 seq_printf(seq, " TxTail %u\n", yp->txtail);
779 seq_printf(seq, " SlotTime %u\n", yp->slot); 775 seq_printf(seq, " SlotTime %u\n", yp->slot);
780 seq_printf(seq, " Persist %u\n", yp->pers); 776 seq_printf(seq, " Persist %u\n", yp->pers);
781 seq_printf(seq, " TxFrames %lu\n", yp->stats.tx_packets); 777 seq_printf(seq, " TxFrames %lu\n", dev->stats.tx_packets);
782 seq_printf(seq, " RxFrames %lu\n", yp->stats.rx_packets); 778 seq_printf(seq, " RxFrames %lu\n", dev->stats.rx_packets);
783 seq_printf(seq, " TxInt %u\n", yp->nb_mdint); 779 seq_printf(seq, " TxInt %u\n", yp->nb_mdint);
784 seq_printf(seq, " RxInt %u\n", yp->nb_rxint); 780 seq_printf(seq, " RxInt %u\n", yp->nb_rxint);
785 seq_printf(seq, " RxOver %lu\n", yp->stats.rx_fifo_errors); 781 seq_printf(seq, " RxOver %lu\n", dev->stats.rx_fifo_errors);
786 seq_printf(seq, "\n"); 782 seq_printf(seq, "\n");
787 return 0; 783 return 0;
788} 784}
789 785
790static struct seq_operations yam_seqops = { 786static const struct seq_operations yam_seqops = {
791 .start = yam_seq_start, 787 .start = yam_seq_start,
792 .next = yam_seq_next, 788 .next = yam_seq_next,
793 .stop = yam_seq_stop, 789 .stop = yam_seq_stop,
@@ -812,26 +808,6 @@ static const struct file_operations yam_info_fops = {
812 808
813/* --------------------------------------------------------------------- */ 809/* --------------------------------------------------------------------- */
814 810
815static struct net_device_stats *yam_get_stats(struct net_device *dev)
816{
817 struct yam_port *yp;
818
819 if (!dev)
820 return NULL;
821
822 yp = netdev_priv(dev);
823 if (yp->magic != YAM_MAGIC)
824 return NULL;
825
826 /*
827 * Get the current statistics. This may be called with the
828 * card open or closed.
829 */
830 return &yp->stats;
831}
832
833/* --------------------------------------------------------------------- */
834
835static int yam_open(struct net_device *dev) 811static int yam_open(struct net_device *dev)
836{ 812{
837 struct yam_port *yp = netdev_priv(dev); 813 struct yam_port *yp = netdev_priv(dev);
@@ -878,9 +854,9 @@ static int yam_open(struct net_device *dev)
878 /* Reset overruns for all ports - FPGA programming makes overruns */ 854 /* Reset overruns for all ports - FPGA programming makes overruns */
879 for (i = 0; i < NR_PORTS; i++) { 855 for (i = 0; i < NR_PORTS; i++) {
880 struct net_device *dev = yam_devs[i]; 856 struct net_device *dev = yam_devs[i];
881 struct yam_port *yp = netdev_priv(dev); 857
882 inb(LSR(dev->base_addr)); 858 inb(LSR(dev->base_addr));
883 yp->stats.rx_fifo_errors = 0; 859 dev->stats.rx_fifo_errors = 0;
884 } 860 }
885 861
886 printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq, 862 printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq,
@@ -1068,6 +1044,14 @@ static int yam_set_mac_address(struct net_device *dev, void *addr)
1068 1044
1069/* --------------------------------------------------------------------- */ 1045/* --------------------------------------------------------------------- */
1070 1046
1047static const struct net_device_ops yam_netdev_ops = {
1048 .ndo_open = yam_open,
1049 .ndo_stop = yam_close,
1050 .ndo_start_xmit = yam_send_packet,
1051 .ndo_do_ioctl = yam_ioctl,
1052 .ndo_set_mac_address = yam_set_mac_address,
1053};
1054
1071static void yam_setup(struct net_device *dev) 1055static void yam_setup(struct net_device *dev)
1072{ 1056{
1073 struct yam_port *yp = netdev_priv(dev); 1057 struct yam_port *yp = netdev_priv(dev);
@@ -1088,18 +1072,11 @@ static void yam_setup(struct net_device *dev)
1088 dev->base_addr = yp->iobase; 1072 dev->base_addr = yp->iobase;
1089 dev->irq = yp->irq; 1073 dev->irq = yp->irq;
1090 1074
1091 dev->open = yam_open;
1092 dev->stop = yam_close;
1093 dev->do_ioctl = yam_ioctl;
1094 dev->hard_start_xmit = yam_send_packet;
1095 dev->get_stats = yam_get_stats;
1096
1097 skb_queue_head_init(&yp->send_queue); 1075 skb_queue_head_init(&yp->send_queue);
1098 1076
1077 dev->netdev_ops = &yam_netdev_ops;
1099 dev->header_ops = &ax25_header_ops; 1078 dev->header_ops = &ax25_header_ops;
1100 1079
1101 dev->set_mac_address = yam_set_mac_address;
1102
1103 dev->type = ARPHRD_AX25; 1080 dev->type = ARPHRD_AX25;
1104 dev->hard_header_len = AX25_MAX_HEADER_LEN; 1081 dev->hard_header_len = AX25_MAX_HEADER_LEN;
1105 dev->mtu = AX25_MTU; 1082 dev->mtu = AX25_MTU;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index dfa6348ac1dc..5c6315df86b9 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1028,10 +1028,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1028 1028
1029 ibmveth_assert(lpar_rc == H_SUCCESS); 1029 ibmveth_assert(lpar_rc == H_SUCCESS);
1030 1030
1031 netif_rx_complete(napi); 1031 napi_complete(napi);
1032 1032
1033 if (ibmveth_rxq_pending_buffer(adapter) && 1033 if (ibmveth_rxq_pending_buffer(adapter) &&
1034 netif_rx_reschedule(napi)) { 1034 napi_reschedule(napi)) {
1035 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1035 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1036 VIO_IRQ_DISABLE); 1036 VIO_IRQ_DISABLE);
1037 goto restart_poll; 1037 goto restart_poll;
@@ -1047,11 +1047,11 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1047 struct ibmveth_adapter *adapter = netdev_priv(netdev); 1047 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1048 unsigned long lpar_rc; 1048 unsigned long lpar_rc;
1049 1049
1050 if (netif_rx_schedule_prep(&adapter->napi)) { 1050 if (napi_schedule_prep(&adapter->napi)) {
1051 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1051 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1052 VIO_IRQ_DISABLE); 1052 VIO_IRQ_DISABLE);
1053 ibmveth_assert(lpar_rc == H_SUCCESS); 1053 ibmveth_assert(lpar_rc == H_SUCCESS);
1054 __netif_rx_schedule(&adapter->napi); 1054 __napi_schedule(&adapter->napi);
1055 } 1055 }
1056 return IRQ_HANDLED; 1056 return IRQ_HANDLED;
1057} 1057}
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index f5e2e7235fcb..9b367ba8e26f 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1103,6 +1103,13 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
1103 E1000_CTRL_SWDPIN1; 1103 E1000_CTRL_SWDPIN1;
1104 wr32(E1000_CTRL, reg); 1104 wr32(E1000_CTRL, reg);
1105 1105
1106 /* Power on phy for 82576 fiber adapters */
1107 if (hw->mac.type == e1000_82576) {
1108 reg = rd32(E1000_CTRL_EXT);
1109 reg &= ~E1000_CTRL_EXT_SDP7_DATA;
1110 wr32(E1000_CTRL_EXT, reg);
1111 }
1112
1106 /* Set switch control to serdes energy detect */ 1113 /* Set switch control to serdes energy detect */
1107 reg = rd32(E1000_CONNSW); 1114 reg = rd32(E1000_CONNSW);
1108 reg |= E1000_CONNSW_ENRGSRC; 1115 reg |= E1000_CONNSW_ENRGSRC;
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 5a27825cc48a..7d8c88739154 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -36,12 +36,6 @@
36 36
37struct igb_adapter; 37struct igb_adapter;
38 38
39#ifdef CONFIG_IGB_LRO
40#include <linux/inet_lro.h>
41#define MAX_LRO_AGGR 32
42#define MAX_LRO_DESCRIPTORS 8
43#endif
44
45/* Interrupt defines */ 39/* Interrupt defines */
46#define IGB_MIN_DYN_ITR 3000 40#define IGB_MIN_DYN_ITR 3000
47#define IGB_MAX_DYN_ITR 96000 41#define IGB_MAX_DYN_ITR 96000
@@ -176,10 +170,6 @@ struct igb_ring {
176 struct napi_struct napi; 170 struct napi_struct napi;
177 int set_itr; 171 int set_itr;
178 struct igb_ring *buddy; 172 struct igb_ring *buddy;
179#ifdef CONFIG_IGB_LRO
180 struct net_lro_mgr lro_mgr;
181 bool lro_used;
182#endif
183 }; 173 };
184 }; 174 };
185 175
@@ -288,12 +278,6 @@ struct igb_adapter {
288 int need_ioport; 278 int need_ioport;
289 279
290 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; 280 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
291#ifdef CONFIG_IGB_LRO
292 unsigned int lro_max_aggr;
293 unsigned int lro_aggregated;
294 unsigned int lro_flushed;
295 unsigned int lro_no_desc;
296#endif
297 unsigned int tx_ring_count; 281 unsigned int tx_ring_count;
298 unsigned int rx_ring_count; 282 unsigned int rx_ring_count;
299}; 283};
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 3c831f1472ad..4606e63fc6f5 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -93,11 +93,6 @@ static const struct igb_stats igb_gstrings_stats[] = {
93 { "tx_smbus", IGB_STAT(stats.mgptc) }, 93 { "tx_smbus", IGB_STAT(stats.mgptc) },
94 { "rx_smbus", IGB_STAT(stats.mgprc) }, 94 { "rx_smbus", IGB_STAT(stats.mgprc) },
95 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 95 { "dropped_smbus", IGB_STAT(stats.mgpdc) },
96#ifdef CONFIG_IGB_LRO
97 { "lro_aggregated", IGB_STAT(lro_aggregated) },
98 { "lro_flushed", IGB_STAT(lro_flushed) },
99 { "lro_no_desc", IGB_STAT(lro_no_desc) },
100#endif
101}; 96};
102 97
103#define IGB_QUEUE_STATS_LEN \ 98#define IGB_QUEUE_STATS_LEN \
@@ -1921,18 +1916,6 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1921 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); 1916 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
1922 int j; 1917 int j;
1923 int i; 1918 int i;
1924#ifdef CONFIG_IGB_LRO
1925 int aggregated = 0, flushed = 0, no_desc = 0;
1926
1927 for (i = 0; i < adapter->num_rx_queues; i++) {
1928 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
1929 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
1930 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
1931 }
1932 adapter->lro_aggregated = aggregated;
1933 adapter->lro_flushed = flushed;
1934 adapter->lro_no_desc = no_desc;
1935#endif
1936 1919
1937 igb_update_stats(adapter); 1920 igb_update_stats(adapter);
1938 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1921 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index b82b0fb2056c..e11043d90dbd 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -115,9 +115,6 @@ static bool igb_clean_tx_irq(struct igb_ring *);
115static int igb_poll(struct napi_struct *, int); 115static int igb_poll(struct napi_struct *, int);
116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
117static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 117static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
118#ifdef CONFIG_IGB_LRO
119static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
120#endif
121static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
122static void igb_tx_timeout(struct net_device *); 119static void igb_tx_timeout(struct net_device *);
123static void igb_reset_task(struct work_struct *); 120static void igb_reset_task(struct work_struct *);
@@ -1189,7 +1186,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1189 netdev->features |= NETIF_F_TSO6; 1186 netdev->features |= NETIF_F_TSO6;
1190 1187
1191#ifdef CONFIG_IGB_LRO 1188#ifdef CONFIG_IGB_LRO
1192 netdev->features |= NETIF_F_LRO; 1189 netdev->features |= NETIF_F_GRO;
1193#endif 1190#endif
1194 1191
1195 netdev->vlan_features |= NETIF_F_TSO; 1192 netdev->vlan_features |= NETIF_F_TSO;
@@ -1200,7 +1197,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1200 if (pci_using_dac) 1197 if (pci_using_dac)
1201 netdev->features |= NETIF_F_HIGHDMA; 1198 netdev->features |= NETIF_F_HIGHDMA;
1202 1199
1203 netdev->features |= NETIF_F_LLTX;
1204 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1200 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1205 1201
1206 /* before reading the NVM, reset the controller to put the device in a 1202 /* before reading the NVM, reset the controller to put the device in a
@@ -1739,14 +1735,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1739 struct pci_dev *pdev = adapter->pdev; 1735 struct pci_dev *pdev = adapter->pdev;
1740 int size, desc_len; 1736 int size, desc_len;
1741 1737
1742#ifdef CONFIG_IGB_LRO
1743 size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
1744 rx_ring->lro_mgr.lro_arr = vmalloc(size);
1745 if (!rx_ring->lro_mgr.lro_arr)
1746 goto err;
1747 memset(rx_ring->lro_mgr.lro_arr, 0, size);
1748#endif
1749
1750 size = sizeof(struct igb_buffer) * rx_ring->count; 1738 size = sizeof(struct igb_buffer) * rx_ring->count;
1751 rx_ring->buffer_info = vmalloc(size); 1739 rx_ring->buffer_info = vmalloc(size);
1752 if (!rx_ring->buffer_info) 1740 if (!rx_ring->buffer_info)
@@ -1773,10 +1761,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1773 return 0; 1761 return 0;
1774 1762
1775err: 1763err:
1776#ifdef CONFIG_IGB_LRO
1777 vfree(rx_ring->lro_mgr.lro_arr);
1778 rx_ring->lro_mgr.lro_arr = NULL;
1779#endif
1780 vfree(rx_ring->buffer_info); 1764 vfree(rx_ring->buffer_info);
1781 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 1765 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1782 "the receive descriptor ring\n"); 1766 "the receive descriptor ring\n");
@@ -1930,16 +1914,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1930 rxdctl |= IGB_RX_HTHRESH << 8; 1914 rxdctl |= IGB_RX_HTHRESH << 8;
1931 rxdctl |= IGB_RX_WTHRESH << 16; 1915 rxdctl |= IGB_RX_WTHRESH << 16;
1932 wr32(E1000_RXDCTL(j), rxdctl); 1916 wr32(E1000_RXDCTL(j), rxdctl);
1933#ifdef CONFIG_IGB_LRO
1934 /* Intitial LRO Settings */
1935 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
1936 ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1937 ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
1938 ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1939 ring->lro_mgr.dev = adapter->netdev;
1940 ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1941 ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1942#endif
1943 } 1917 }
1944 1918
1945 if (adapter->num_rx_queues > 1) { 1919 if (adapter->num_rx_queues > 1) {
@@ -2128,11 +2102,6 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2128 vfree(rx_ring->buffer_info); 2102 vfree(rx_ring->buffer_info);
2129 rx_ring->buffer_info = NULL; 2103 rx_ring->buffer_info = NULL;
2130 2104
2131#ifdef CONFIG_IGB_LRO
2132 vfree(rx_ring->lro_mgr.lro_arr);
2133 rx_ring->lro_mgr.lro_arr = NULL;
2134#endif
2135
2136 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2105 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2137 2106
2138 rx_ring->desc = NULL; 2107 rx_ring->desc = NULL;
@@ -3386,8 +3355,8 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
3386 3355
3387 igb_write_itr(rx_ring); 3356 igb_write_itr(rx_ring);
3388 3357
3389 if (netif_rx_schedule_prep(&rx_ring->napi)) 3358 if (napi_schedule_prep(&rx_ring->napi))
3390 __netif_rx_schedule(&rx_ring->napi); 3359 __napi_schedule(&rx_ring->napi);
3391 3360
3392#ifdef CONFIG_IGB_DCA 3361#ifdef CONFIG_IGB_DCA
3393 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 3362 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -3539,7 +3508,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
3539 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3508 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3540 } 3509 }
3541 3510
3542 netif_rx_schedule(&adapter->rx_ring[0].napi); 3511 napi_schedule(&adapter->rx_ring[0].napi);
3543 3512
3544 return IRQ_HANDLED; 3513 return IRQ_HANDLED;
3545} 3514}
@@ -3577,7 +3546,7 @@ static irqreturn_t igb_intr(int irq, void *data)
3577 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3546 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3578 } 3547 }
3579 3548
3580 netif_rx_schedule(&adapter->rx_ring[0].napi); 3549 napi_schedule(&adapter->rx_ring[0].napi);
3581 3550
3582 return IRQ_HANDLED; 3551 return IRQ_HANDLED;
3583} 3552}
@@ -3612,7 +3581,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
3612 !netif_running(netdev)) { 3581 !netif_running(netdev)) {
3613 if (adapter->itr_setting & 3) 3582 if (adapter->itr_setting & 3)
3614 igb_set_itr(adapter); 3583 igb_set_itr(adapter);
3615 netif_rx_complete(napi); 3584 napi_complete(napi);
3616 if (!test_bit(__IGB_DOWN, &adapter->state)) 3585 if (!test_bit(__IGB_DOWN, &adapter->state))
3617 igb_irq_enable(adapter); 3586 igb_irq_enable(adapter);
3618 return 0; 3587 return 0;
@@ -3638,7 +3607,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3638 3607
3639 /* If not enough Rx work done, exit the polling mode */ 3608 /* If not enough Rx work done, exit the polling mode */
3640 if ((work_done == 0) || !netif_running(netdev)) { 3609 if ((work_done == 0) || !netif_running(netdev)) {
3641 netif_rx_complete(napi); 3610 napi_complete(napi);
3642 3611
3643 if (adapter->itr_setting & 3) { 3612 if (adapter->itr_setting & 3) {
3644 if (adapter->num_rx_queues == 1) 3613 if (adapter->num_rx_queues == 1)
@@ -3768,39 +3737,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3768 return (count < tx_ring->count); 3737 return (count < tx_ring->count);
3769} 3738}
3770 3739
3771#ifdef CONFIG_IGB_LRO
3772 /**
3773 * igb_get_skb_hdr - helper function for LRO header processing
3774 * @skb: pointer to sk_buff to be added to LRO packet
3775 * @iphdr: pointer to ip header structure
3776 * @tcph: pointer to tcp header structure
3777 * @hdr_flags: pointer to header flags
3778 * @priv: pointer to the receive descriptor for the current sk_buff
3779 **/
3780static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
3781 u64 *hdr_flags, void *priv)
3782{
3783 union e1000_adv_rx_desc *rx_desc = priv;
3784 u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
3785 (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
3786
3787 /* Verify that this is a valid IPv4 TCP packet */
3788 if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
3789 E1000_RXDADV_PKTTYPE_TCP))
3790 return -1;
3791
3792 /* Set network headers */
3793 skb_reset_network_header(skb);
3794 skb_set_transport_header(skb, ip_hdrlen(skb));
3795 *iphdr = ip_hdr(skb);
3796 *tcph = tcp_hdr(skb);
3797 *hdr_flags = LRO_IPV4 | LRO_TCP;
3798
3799 return 0;
3800
3801}
3802#endif /* CONFIG_IGB_LRO */
3803
3804/** 3740/**
3805 * igb_receive_skb - helper function to handle rx indications 3741 * igb_receive_skb - helper function to handle rx indications
3806 * @ring: pointer to receive ring receving this packet 3742 * @ring: pointer to receive ring receving this packet
@@ -3815,28 +3751,20 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status,
3815 struct igb_adapter * adapter = ring->adapter; 3751 struct igb_adapter * adapter = ring->adapter;
3816 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 3752 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
3817 3753
3818#ifdef CONFIG_IGB_LRO 3754 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3819 if (adapter->netdev->features & NETIF_F_LRO &&
3820 skb->ip_summed == CHECKSUM_UNNECESSARY) {
3821 if (vlan_extracted) 3755 if (vlan_extracted)
3822 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 3756 vlan_gro_receive(&ring->napi, adapter->vlgrp,
3823 adapter->vlgrp, 3757 le16_to_cpu(rx_desc->wb.upper.vlan),
3824 le16_to_cpu(rx_desc->wb.upper.vlan), 3758 skb);
3825 rx_desc);
3826 else 3759 else
3827 lro_receive_skb(&ring->lro_mgr,skb, rx_desc); 3760 napi_gro_receive(&ring->napi, skb);
3828 ring->lro_used = 1;
3829 } else { 3761 } else {
3830#endif
3831 if (vlan_extracted) 3762 if (vlan_extracted)
3832 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3763 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3833 le16_to_cpu(rx_desc->wb.upper.vlan)); 3764 le16_to_cpu(rx_desc->wb.upper.vlan));
3834 else 3765 else
3835
3836 netif_receive_skb(skb); 3766 netif_receive_skb(skb);
3837#ifdef CONFIG_IGB_LRO
3838 } 3767 }
3839#endif
3840} 3768}
3841 3769
3842 3770
@@ -3991,13 +3919,6 @@ next_desc:
3991 rx_ring->next_to_clean = i; 3919 rx_ring->next_to_clean = i;
3992 cleaned_count = IGB_DESC_UNUSED(rx_ring); 3920 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3993 3921
3994#ifdef CONFIG_IGB_LRO
3995 if (rx_ring->lro_used) {
3996 lro_flush_all(&rx_ring->lro_mgr);
3997 rx_ring->lro_used = 0;
3998 }
3999#endif
4000
4001 if (cleaned_count) 3922 if (cleaned_count)
4002 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); 3923 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
4003 3924
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index eee28d395682..e2ef16b29700 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1721,14 +1721,14 @@ ixgb_intr(int irq, void *data)
1721 if (!test_bit(__IXGB_DOWN, &adapter->flags)) 1721 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1722 mod_timer(&adapter->watchdog_timer, jiffies); 1722 mod_timer(&adapter->watchdog_timer, jiffies);
1723 1723
1724 if (netif_rx_schedule_prep(&adapter->napi)) { 1724 if (napi_schedule_prep(&adapter->napi)) {
1725 1725
1726 /* Disable interrupts and register for poll. The flush 1726 /* Disable interrupts and register for poll. The flush
1727 of the posted write is intentionally left out. 1727 of the posted write is intentionally left out.
1728 */ 1728 */
1729 1729
1730 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 1730 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1731 __netif_rx_schedule(&adapter->napi); 1731 __napi_schedule(&adapter->napi);
1732 } 1732 }
1733 return IRQ_HANDLED; 1733 return IRQ_HANDLED;
1734} 1734}
@@ -1749,7 +1749,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
1749 1749
1750 /* If budget not fully consumed, exit the polling mode */ 1750 /* If budget not fully consumed, exit the polling mode */
1751 if (work_done < budget) { 1751 if (work_done < budget) {
1752 netif_rx_complete(napi); 1752 napi_complete(napi);
1753 if (!test_bit(__IXGB_DOWN, &adapter->flags)) 1753 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1754 ixgb_irq_enable(adapter); 1754 ixgb_irq_enable(adapter);
1755 } 1755 }
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e112008f39c1..6ac361a4b8ad 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -31,7 +31,6 @@
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/inet_lro.h>
35#include <linux/aer.h> 34#include <linux/aer.h>
36 35
37#include "ixgbe_type.h" 36#include "ixgbe_type.h"
@@ -88,9 +87,6 @@
88#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 87#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
89#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 88#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
90 89
91#define IXGBE_MAX_LRO_DESCRIPTORS 8
92#define IXGBE_MAX_LRO_AGGREGATE 32
93
94/* wrapper around a pointer to a socket buffer, 90/* wrapper around a pointer to a socket buffer,
95 * so a DMA handle can be stored along with the buffer */ 91 * so a DMA handle can be stored along with the buffer */
96struct ixgbe_tx_buffer { 92struct ixgbe_tx_buffer {
@@ -142,8 +138,6 @@ struct ixgbe_ring {
142 /* cpu for tx queue */ 138 /* cpu for tx queue */
143 int cpu; 139 int cpu;
144#endif 140#endif
145 struct net_lro_mgr lro_mgr;
146 bool lro_used;
147 struct ixgbe_queue_stats stats; 141 struct ixgbe_queue_stats stats;
148 u16 v_idx; /* maps directly to the index for this ring in the hardware 142 u16 v_idx; /* maps directly to the index for this ring in the hardware
149 * vector array, can also be used for finding the bit in EICR 143 * vector array, can also be used for finding the bit in EICR
@@ -301,9 +295,6 @@ struct ixgbe_adapter {
301 295
302 unsigned long state; 296 unsigned long state;
303 u64 tx_busy; 297 u64 tx_busy;
304 u64 lro_aggregated;
305 u64 lro_flushed;
306 u64 lro_no_desc;
307 unsigned int tx_ring_count; 298 unsigned int tx_ring_count;
308 unsigned int rx_ring_count; 299 unsigned int rx_ring_count;
309 300
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 67f87a79154d..4f6b5dfc78a2 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -89,8 +89,6 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
89 {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, 89 {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
90 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 90 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
91 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 91 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
92 {"lro_aggregated", IXGBE_STAT(lro_aggregated)},
93 {"lro_flushed", IXGBE_STAT(lro_flushed)},
94}; 92};
95 93
96#define IXGBE_QUEUE_STATS_LEN \ 94#define IXGBE_QUEUE_STATS_LEN \
@@ -808,15 +806,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
808 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); 806 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
809 int j, k; 807 int j, k;
810 int i; 808 int i;
811 u64 aggregated = 0, flushed = 0, no_desc = 0;
812 for (i = 0; i < adapter->num_rx_queues; i++) {
813 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
814 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
815 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
816 }
817 adapter->lro_aggregated = aggregated;
818 adapter->lro_flushed = flushed;
819 adapter->lro_no_desc = no_desc;
820 809
821 ixgbe_update_stats(adapter); 810 ixgbe_update_stats(adapter);
822 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 811 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index d2f4d5f508b7..f7b592eff68e 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -403,23 +403,20 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
403 * @rx_ring: rx descriptor ring (for a specific queue) to setup 403 * @rx_ring: rx descriptor ring (for a specific queue) to setup
404 * @rx_desc: rx descriptor 404 * @rx_desc: rx descriptor
405 **/ 405 **/
406static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, 406static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
407 struct sk_buff *skb, u8 status, 407 struct sk_buff *skb, u8 status,
408 struct ixgbe_ring *ring,
409 union ixgbe_adv_rx_desc *rx_desc) 408 union ixgbe_adv_rx_desc *rx_desc)
410{ 409{
410 struct ixgbe_adapter *adapter = q_vector->adapter;
411 struct napi_struct *napi = &q_vector->napi;
411 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 412 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
412 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 413 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
413 414
414 if (adapter->netdev->features & NETIF_F_LRO && 415 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
415 skb->ip_summed == CHECKSUM_UNNECESSARY) {
416 if (adapter->vlgrp && is_vlan && (tag != 0)) 416 if (adapter->vlgrp && is_vlan && (tag != 0))
417 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 417 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
418 adapter->vlgrp, tag,
419 rx_desc);
420 else 418 else
421 lro_receive_skb(&ring->lro_mgr, skb, rx_desc); 419 napi_gro_receive(napi, skb);
422 ring->lro_used = true;
423 } else { 420 } else {
424 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 421 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
425 if (adapter->vlgrp && is_vlan && (tag != 0)) 422 if (adapter->vlgrp && is_vlan && (tag != 0))
@@ -574,10 +571,11 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
574 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 571 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
575} 572}
576 573
577static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, 574static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
578 struct ixgbe_ring *rx_ring, 575 struct ixgbe_ring *rx_ring,
579 int *work_done, int work_to_do) 576 int *work_done, int work_to_do)
580{ 577{
578 struct ixgbe_adapter *adapter = q_vector->adapter;
581 struct pci_dev *pdev = adapter->pdev; 579 struct pci_dev *pdev = adapter->pdev;
582 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 580 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
583 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 581 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -678,7 +676,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
678 total_rx_packets++; 676 total_rx_packets++;
679 677
680 skb->protocol = eth_type_trans(skb, adapter->netdev); 678 skb->protocol = eth_type_trans(skb, adapter->netdev);
681 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); 679 ixgbe_receive_skb(q_vector, skb, staterr, rx_desc);
682 680
683next_desc: 681next_desc:
684 rx_desc->wb.upper.status_error = 0; 682 rx_desc->wb.upper.status_error = 0;
@@ -696,11 +694,6 @@ next_desc:
696 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 694 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
697 } 695 }
698 696
699 if (rx_ring->lro_used) {
700 lro_flush_all(&rx_ring->lro_mgr);
701 rx_ring->lro_used = false;
702 }
703
704 rx_ring->next_to_clean = i; 697 rx_ring->next_to_clean = i;
705 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 698 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
706 699
@@ -1015,7 +1008,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1015 rx_ring = &(adapter->rx_ring[r_idx]); 1008 rx_ring = &(adapter->rx_ring[r_idx]);
1016 /* disable interrupts on this vector only */ 1009 /* disable interrupts on this vector only */
1017 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); 1010 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
1018 netif_rx_schedule(&q_vector->napi); 1011 napi_schedule(&q_vector->napi);
1019 1012
1020 return IRQ_HANDLED; 1013 return IRQ_HANDLED;
1021} 1014}
@@ -1052,11 +1045,11 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1052 ixgbe_update_rx_dca(adapter, rx_ring); 1045 ixgbe_update_rx_dca(adapter, rx_ring);
1053#endif 1046#endif
1054 1047
1055 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); 1048 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1056 1049
1057 /* If all Rx work done, exit the polling mode */ 1050 /* If all Rx work done, exit the polling mode */
1058 if (work_done < budget) { 1051 if (work_done < budget) {
1059 netif_rx_complete(napi); 1052 napi_complete(napi);
1060 if (adapter->itr_setting & 3) 1053 if (adapter->itr_setting & 3)
1061 ixgbe_set_itr_msix(q_vector); 1054 ixgbe_set_itr_msix(q_vector);
1062 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1055 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1095,7 +1088,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1095 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1088 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1096 ixgbe_update_rx_dca(adapter, rx_ring); 1089 ixgbe_update_rx_dca(adapter, rx_ring);
1097#endif 1090#endif
1098 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); 1091 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1099 enable_mask |= rx_ring->v_idx; 1092 enable_mask |= rx_ring->v_idx;
1100 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1093 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1101 r_idx + 1); 1094 r_idx + 1);
@@ -1105,7 +1098,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1105 rx_ring = &(adapter->rx_ring[r_idx]); 1098 rx_ring = &(adapter->rx_ring[r_idx]);
1106 /* If all Rx work done, exit the polling mode */ 1099 /* If all Rx work done, exit the polling mode */
1107 if (work_done < budget) { 1100 if (work_done < budget) {
1108 netif_rx_complete(napi); 1101 napi_complete(napi);
1109 if (adapter->itr_setting & 3) 1102 if (adapter->itr_setting & 3)
1110 ixgbe_set_itr_msix(q_vector); 1103 ixgbe_set_itr_msix(q_vector);
1111 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1104 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1381,13 +1374,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
1381 1374
1382 ixgbe_check_fan_failure(adapter, eicr); 1375 ixgbe_check_fan_failure(adapter, eicr);
1383 1376
1384 if (netif_rx_schedule_prep(&adapter->q_vector[0].napi)) { 1377 if (napi_schedule_prep(&adapter->q_vector[0].napi)) {
1385 adapter->tx_ring[0].total_packets = 0; 1378 adapter->tx_ring[0].total_packets = 0;
1386 adapter->tx_ring[0].total_bytes = 0; 1379 adapter->tx_ring[0].total_bytes = 0;
1387 adapter->rx_ring[0].total_packets = 0; 1380 adapter->rx_ring[0].total_packets = 0;
1388 adapter->rx_ring[0].total_bytes = 0; 1381 adapter->rx_ring[0].total_bytes = 0;
1389 /* would disable interrupts here but EIAM disabled it */ 1382 /* would disable interrupts here but EIAM disabled it */
1390 __netif_rx_schedule(&adapter->q_vector[0].napi); 1383 __napi_schedule(&adapter->q_vector[0].napi);
1391 } 1384 }
1392 1385
1393 return IRQ_HANDLED; 1386 return IRQ_HANDLED;
@@ -1568,33 +1561,6 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1568 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 1561 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1569} 1562}
1570 1563
1571/**
1572 * ixgbe_get_skb_hdr - helper function for LRO header processing
1573 * @skb: pointer to sk_buff to be added to LRO packet
1574 * @iphdr: pointer to ip header structure
1575 * @tcph: pointer to tcp header structure
1576 * @hdr_flags: pointer to header flags
1577 * @priv: private data
1578 **/
1579static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1580 u64 *hdr_flags, void *priv)
1581{
1582 union ixgbe_adv_rx_desc *rx_desc = priv;
1583
1584 /* Verify that this is a valid IPv4 TCP packet */
1585 if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
1586 (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
1587 return -1;
1588
1589 /* Set network headers */
1590 skb_reset_network_header(skb);
1591 skb_set_transport_header(skb, ip_hdrlen(skb));
1592 *iphdr = ip_hdr(skb);
1593 *tcph = tcp_hdr(skb);
1594 *hdr_flags = LRO_IPV4 | LRO_TCP;
1595 return 0;
1596}
1597
1598#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 1564#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1599 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 1565 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1600 1566
@@ -1666,16 +1632,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1666 adapter->rx_ring[i].head = IXGBE_RDH(j); 1632 adapter->rx_ring[i].head = IXGBE_RDH(j);
1667 adapter->rx_ring[i].tail = IXGBE_RDT(j); 1633 adapter->rx_ring[i].tail = IXGBE_RDT(j);
1668 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1634 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1669 /* Intitial LRO Settings */
1670 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1671 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1672 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1673 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1674 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1675 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1676 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1677 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1678 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1679 1635
1680 ixgbe_configure_srrctl(adapter, j); 1636 ixgbe_configure_srrctl(adapter, j);
1681 } 1637 }
@@ -2310,14 +2266,14 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2310#endif 2266#endif
2311 2267
2312 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); 2268 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
2313 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget); 2269 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
2314 2270
2315 if (tx_cleaned) 2271 if (tx_cleaned)
2316 work_done = budget; 2272 work_done = budget;
2317 2273
2318 /* If budget not fully consumed, exit the polling mode */ 2274 /* If budget not fully consumed, exit the polling mode */
2319 if (work_done < budget) { 2275 if (work_done < budget) {
2320 netif_rx_complete(napi); 2276 napi_complete(napi);
2321 if (adapter->itr_setting & 3) 2277 if (adapter->itr_setting & 3)
2322 ixgbe_set_itr(adapter); 2278 ixgbe_set_itr(adapter);
2323 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2279 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2926,12 +2882,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2926 struct pci_dev *pdev = adapter->pdev; 2882 struct pci_dev *pdev = adapter->pdev;
2927 int size; 2883 int size;
2928 2884
2929 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2930 rx_ring->lro_mgr.lro_arr = vmalloc(size);
2931 if (!rx_ring->lro_mgr.lro_arr)
2932 return -ENOMEM;
2933 memset(rx_ring->lro_mgr.lro_arr, 0, size);
2934
2935 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 2885 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2936 rx_ring->rx_buffer_info = vmalloc(size); 2886 rx_ring->rx_buffer_info = vmalloc(size);
2937 if (!rx_ring->rx_buffer_info) { 2887 if (!rx_ring->rx_buffer_info) {
@@ -2960,8 +2910,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2960 return 0; 2910 return 0;
2961 2911
2962alloc_failed: 2912alloc_failed:
2963 vfree(rx_ring->lro_mgr.lro_arr);
2964 rx_ring->lro_mgr.lro_arr = NULL;
2965 return -ENOMEM; 2913 return -ENOMEM;
2966} 2914}
2967 2915
@@ -3039,9 +2987,6 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
3039{ 2987{
3040 struct pci_dev *pdev = adapter->pdev; 2988 struct pci_dev *pdev = adapter->pdev;
3041 2989
3042 vfree(rx_ring->lro_mgr.lro_arr);
3043 rx_ring->lro_mgr.lro_arr = NULL;
3044
3045 ixgbe_clean_rx_ring(adapter, rx_ring); 2990 ixgbe_clean_rx_ring(adapter, rx_ring);
3046 2991
3047 vfree(rx_ring->rx_buffer_info); 2992 vfree(rx_ring->rx_buffer_info);
@@ -4141,7 +4086,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4141 netdev->features |= NETIF_F_IPV6_CSUM; 4086 netdev->features |= NETIF_F_IPV6_CSUM;
4142 netdev->features |= NETIF_F_TSO; 4087 netdev->features |= NETIF_F_TSO;
4143 netdev->features |= NETIF_F_TSO6; 4088 netdev->features |= NETIF_F_TSO6;
4144 netdev->features |= NETIF_F_LRO; 4089 netdev->features |= NETIF_F_GRO;
4145 4090
4146 netdev->vlan_features |= NETIF_F_TSO; 4091 netdev->vlan_features |= NETIF_F_TSO;
4147 netdev->vlan_features |= NETIF_F_TSO6; 4092 netdev->vlan_features |= NETIF_F_TSO6;
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 014745720560..d3bf2f017cc2 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -141,7 +141,7 @@ static int ixpdev_poll(struct napi_struct *napi, int budget)
141 break; 141 break;
142 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); 142 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
143 143
144 netif_rx_complete(napi); 144 napi_complete(napi);
145 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); 145 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
146 146
147 return rx; 147 return rx;
@@ -204,7 +204,7 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
204 204
205 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); 205 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
206 if (likely(napi_schedule_prep(&ip->napi))) { 206 if (likely(napi_schedule_prep(&ip->napi))) {
207 __netif_rx_schedule(&ip->napi); 207 __napi_schedule(&ip->napi);
208 } else { 208 } else {
209 printk(KERN_CRIT "ixp2000: irq while polling!!\n"); 209 printk(KERN_CRIT "ixp2000: irq while polling!!\n");
210 } 210 }
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 5154411b5e6b..e321c678b11c 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -398,15 +398,15 @@ struct jme_ring {
398#define JME_NAPI_WEIGHT(w) int w 398#define JME_NAPI_WEIGHT(w) int w
399#define JME_NAPI_WEIGHT_VAL(w) w 399#define JME_NAPI_WEIGHT_VAL(w) w
400#define JME_NAPI_WEIGHT_SET(w, r) 400#define JME_NAPI_WEIGHT_SET(w, r)
401#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(napis) 401#define JME_RX_COMPLETE(dev, napis) napi_complete(napis)
402#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi); 402#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
403#define JME_NAPI_DISABLE(priv) \ 403#define JME_NAPI_DISABLE(priv) \
404 if (!napi_disable_pending(&priv->napi)) \ 404 if (!napi_disable_pending(&priv->napi)) \
405 napi_disable(&priv->napi); 405 napi_disable(&priv->napi);
406#define JME_RX_SCHEDULE_PREP(priv) \ 406#define JME_RX_SCHEDULE_PREP(priv) \
407 netif_rx_schedule_prep(&priv->napi) 407 napi_schedule_prep(&priv->napi)
408#define JME_RX_SCHEDULE(priv) \ 408#define JME_RX_SCHEDULE(priv) \
409 __netif_rx_schedule(&priv->napi); 409 __napi_schedule(&priv->napi);
410 410
411/* 411/*
412 * Jmac Adapter Private data 412 * Jmac Adapter Private data
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 75010cac76ac..38d6649a29c4 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -334,7 +334,7 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
334 DMA_STAT_HALT | DMA_STAT_ERR), 334 DMA_STAT_HALT | DMA_STAT_ERR),
335 &lp->rx_dma_regs->dmasm); 335 &lp->rx_dma_regs->dmasm);
336 336
337 netif_rx_schedule(&lp->napi); 337 napi_schedule(&lp->napi);
338 338
339 if (dmas & DMA_STAT_ERR) 339 if (dmas & DMA_STAT_ERR)
340 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name); 340 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
@@ -468,7 +468,7 @@ static int korina_poll(struct napi_struct *napi, int budget)
468 468
469 work_done = korina_rx(dev, budget); 469 work_done = korina_rx(dev, budget);
470 if (work_done < budget) { 470 if (work_done < budget) {
471 netif_rx_complete(napi); 471 napi_complete(napi);
472 472
473 writel(readl(&lp->rx_dma_regs->dmasm) & 473 writel(readl(&lp->rx_dma_regs->dmasm) &
474 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), 474 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f6c4936e2fa8..dc33d51213d7 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -527,7 +527,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
527 * this function was called last time, and no packets 527 * this function was called last time, and no packets
528 * have been received since. 528 * have been received since.
529 */ 529 */
530 netif_rx_complete(napi); 530 napi_complete(napi);
531 goto out; 531 goto out;
532 } 532 }
533 533
@@ -538,13 +538,13 @@ static int macb_poll(struct napi_struct *napi, int budget)
538 dev_warn(&bp->pdev->dev, 538 dev_warn(&bp->pdev->dev,
539 "No RX buffers complete, status = %02lx\n", 539 "No RX buffers complete, status = %02lx\n",
540 (unsigned long)status); 540 (unsigned long)status);
541 netif_rx_complete(napi); 541 napi_complete(napi);
542 goto out; 542 goto out;
543 } 543 }
544 544
545 work_done = macb_rx(bp, budget); 545 work_done = macb_rx(bp, budget);
546 if (work_done < budget) 546 if (work_done < budget)
547 netif_rx_complete(napi); 547 napi_complete(napi);
548 548
549 /* 549 /*
550 * We've done what we can to clean the buffers. Make sure we 550 * We've done what we can to clean the buffers. Make sure we
@@ -579,7 +579,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
579 } 579 }
580 580
581 if (status & MACB_RX_INT_FLAGS) { 581 if (status & MACB_RX_INT_FLAGS) {
582 if (netif_rx_schedule_prep(&bp->napi)) { 582 if (napi_schedule_prep(&bp->napi)) {
583 /* 583 /*
584 * There's no point taking any more interrupts 584 * There's no point taking any more interrupts
585 * until we have processed the buffers 585 * until we have processed the buffers
@@ -587,7 +587,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
587 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 587 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
588 dev_dbg(&bp->pdev->dev, 588 dev_dbg(&bp->pdev->dev,
589 "scheduling RX softirq\n"); 589 "scheduling RX softirq\n");
590 __netif_rx_schedule(&bp->napi); 590 __napi_schedule(&bp->napi);
591 } 591 }
592 } 592 }
593 593
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index c61b0bdca1a4..ac55ebd2f146 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -814,7 +814,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
814 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 814 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
815 815
816 if (priv->port_up) 816 if (priv->port_up)
817 netif_rx_schedule(&cq->napi); 817 napi_schedule(&cq->napi);
818 else 818 else
819 mlx4_en_arm_cq(priv, cq); 819 mlx4_en_arm_cq(priv, cq);
820} 820}
@@ -834,7 +834,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
834 INC_PERF_COUNTER(priv->pstats.napi_quota); 834 INC_PERF_COUNTER(priv->pstats.napi_quota);
835 else { 835 else {
836 /* Done for now */ 836 /* Done for now */
837 netif_rx_complete(napi); 837 napi_complete(napi);
838 mlx4_en_arm_cq(priv, cq); 838 mlx4_en_arm_cq(priv, cq);
839 } 839 }
840 return done; 840 return done;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e9c1296b267e..2dacb8852dc3 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1514,7 +1514,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1514 work_done = myri10ge_clean_rx_done(ss, budget); 1514 work_done = myri10ge_clean_rx_done(ss, budget);
1515 1515
1516 if (work_done < budget) { 1516 if (work_done < budget) {
1517 netif_rx_complete(napi); 1517 napi_complete(napi);
1518 put_be32(htonl(3), ss->irq_claim); 1518 put_be32(htonl(3), ss->irq_claim);
1519 } 1519 }
1520 return work_done; 1520 return work_done;
@@ -1532,7 +1532,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1532 /* an interrupt on a non-zero receive-only slice is implicitly 1532 /* an interrupt on a non-zero receive-only slice is implicitly
1533 * valid since MSI-X irqs are not shared */ 1533 * valid since MSI-X irqs are not shared */
1534 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { 1534 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1535 netif_rx_schedule(&ss->napi); 1535 napi_schedule(&ss->napi);
1536 return (IRQ_HANDLED); 1536 return (IRQ_HANDLED);
1537 } 1537 }
1538 1538
@@ -1543,7 +1543,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1543 /* low bit indicates receives are present, so schedule 1543 /* low bit indicates receives are present, so schedule
1544 * napi poll handler */ 1544 * napi poll handler */
1545 if (stats->valid & 1) 1545 if (stats->valid & 1)
1546 netif_rx_schedule(&ss->napi); 1546 napi_schedule(&ss->napi);
1547 1547
1548 if (!mgp->msi_enabled && !mgp->msix_enabled) { 1548 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1549 put_be32(0, mgp->irq_deassert); 1549 put_be32(0, mgp->irq_deassert);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index c5dec54251bf..c23a58624a33 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2198,10 +2198,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
2198 2198
2199 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); 2199 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2200 2200
2201 if (netif_rx_schedule_prep(&np->napi)) { 2201 if (napi_schedule_prep(&np->napi)) {
2202 /* Disable interrupts and register for poll */ 2202 /* Disable interrupts and register for poll */
2203 natsemi_irq_disable(dev); 2203 natsemi_irq_disable(dev);
2204 __netif_rx_schedule(&np->napi); 2204 __napi_schedule(&np->napi);
2205 } else 2205 } else
2206 printk(KERN_WARNING 2206 printk(KERN_WARNING
2207 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", 2207 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
@@ -2253,7 +2253,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget)
2253 np->intr_status = readl(ioaddr + IntrStatus); 2253 np->intr_status = readl(ioaddr + IntrStatus);
2254 } while (np->intr_status); 2254 } while (np->intr_status);
2255 2255
2256 netif_rx_complete(napi); 2256 napi_complete(napi);
2257 2257
2258 /* Reenable interrupts providing nothing is trying to shut 2258 /* Reenable interrupts providing nothing is trying to shut
2259 * the chip down. */ 2259 * the chip down. */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 645d384fe87e..cc06cc5429fe 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1640,7 +1640,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
1640 } 1640 }
1641 1641
1642 if ((work_done < budget) && tx_complete) { 1642 if ((work_done < budget) && tx_complete) {
1643 netif_rx_complete(&adapter->napi); 1643 napi_complete(&adapter->napi);
1644 netxen_nic_enable_int(adapter); 1644 netxen_nic_enable_int(adapter);
1645 } 1645 }
1646 1646
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 0c0b752315ca..4a5a089fa301 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3669,7 +3669,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
3669 work_done = niu_poll_core(np, lp, budget); 3669 work_done = niu_poll_core(np, lp, budget);
3670 3670
3671 if (work_done < budget) { 3671 if (work_done < budget) {
3672 netif_rx_complete(napi); 3672 napi_complete(napi);
3673 niu_ldg_rearm(np, lp, 1); 3673 niu_ldg_rearm(np, lp, 1);
3674 } 3674 }
3675 return work_done; 3675 return work_done;
@@ -4088,12 +4088,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4088static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, 4088static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4089 u64 v0, u64 v1, u64 v2) 4089 u64 v0, u64 v1, u64 v2)
4090{ 4090{
4091 if (likely(netif_rx_schedule_prep(&lp->napi))) { 4091 if (likely(napi_schedule_prep(&lp->napi))) {
4092 lp->v0 = v0; 4092 lp->v0 = v0;
4093 lp->v1 = v1; 4093 lp->v1 = v1;
4094 lp->v2 = v2; 4094 lp->v2 = v2;
4095 __niu_fastpath_interrupt(np, lp->ldg_num, v0); 4095 __niu_fastpath_interrupt(np, lp->ldg_num, v0);
4096 __netif_rx_schedule(&lp->napi); 4096 __napi_schedule(&lp->napi);
4097 } 4097 }
4098} 4098}
4099 4099
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index d0349e7d73ea..5eeb5a87b738 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -970,7 +970,7 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
970 if (*chan->status & PAS_STATUS_ERROR) 970 if (*chan->status & PAS_STATUS_ERROR)
971 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; 971 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
972 972
973 netif_rx_schedule(&mac->napi); 973 napi_schedule(&mac->napi);
974 974
975 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); 975 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
976 976
@@ -1010,7 +1010,7 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
1010 1010
1011 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); 1011 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
1012 1012
1013 netif_rx_schedule(&mac->napi); 1013 napi_schedule(&mac->napi);
1014 1014
1015 if (reg) 1015 if (reg)
1016 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); 1016 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
@@ -1639,7 +1639,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1639 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); 1639 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
1640 if (pkts < budget) { 1640 if (pkts < budget) {
1641 /* all done, no more packets present */ 1641 /* all done, no more packets present */
1642 netif_rx_complete(napi); 1642 napi_complete(napi);
1643 1643
1644 pasemi_mac_restart_rx_intr(mac); 1644 pasemi_mac_restart_rx_intr(mac);
1645 pasemi_mac_restart_tx_intr(mac); 1645 pasemi_mac_restart_tx_intr(mac);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 665a4286da39..80124fac65fa 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1397,7 +1397,7 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
1397 if (work_done < budget) { 1397 if (work_done < budget) {
1398 spin_lock_irqsave(&lp->lock, flags); 1398 spin_lock_irqsave(&lp->lock, flags);
1399 1399
1400 __netif_rx_complete(napi); 1400 __napi_complete(napi);
1401 1401
1402 /* clear interrupt masks */ 1402 /* clear interrupt masks */
1403 val = lp->a.read_csr(ioaddr, CSR3); 1403 val = lp->a.read_csr(ioaddr, CSR3);
@@ -2592,14 +2592,14 @@ pcnet32_interrupt(int irq, void *dev_id)
2592 dev->name, csr0); 2592 dev->name, csr0);
2593 /* unlike for the lance, there is no restart needed */ 2593 /* unlike for the lance, there is no restart needed */
2594 } 2594 }
2595 if (netif_rx_schedule_prep(&lp->napi)) { 2595 if (napi_schedule_prep(&lp->napi)) {
2596 u16 val; 2596 u16 val;
2597 /* set interrupt masks */ 2597 /* set interrupt masks */
2598 val = lp->a.read_csr(ioaddr, CSR3); 2598 val = lp->a.read_csr(ioaddr, CSR3);
2599 val |= 0x5f00; 2599 val |= 0x5f00;
2600 lp->a.write_csr(ioaddr, CSR3, val); 2600 lp->a.write_csr(ioaddr, CSR3, val);
2601 mmiowb(); 2601 mmiowb();
2602 __netif_rx_schedule(&lp->napi); 2602 __napi_schedule(&lp->napi);
2603 break; 2603 break;
2604 } 2604 }
2605 csr0 = lp->a.read_csr(ioaddr, CSR0); 2605 csr0 = lp->a.read_csr(ioaddr, CSR0);
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index a439ebeb4319..3f460c564927 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -200,16 +200,21 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
200{ 200{
201 struct device_node *np = NULL; 201 struct device_node *np = NULL;
202 struct mdio_gpio_platform_data *pdata; 202 struct mdio_gpio_platform_data *pdata;
203 int ret;
203 204
204 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 205 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
205 if (!pdata) 206 if (!pdata)
206 return -ENOMEM; 207 return -ENOMEM;
207 208
208 pdata->mdc = of_get_gpio(ofdev->node, 0); 209 ret = of_get_gpio(ofdev->node, 0);
209 pdata->mdio = of_get_gpio(ofdev->node, 1); 210 if (ret < 0)
210
211 if (pdata->mdc < 0 || pdata->mdio < 0)
212 goto out_free; 211 goto out_free;
212 pdata->mdc = ret;
213
214 ret = of_get_gpio(ofdev->node, 1);
215 if (ret < 0)
216 goto out_free;
217 pdata->mdio = ret;
213 218
214 while ((np = of_get_next_child(ofdev->node, np))) 219 while ((np = of_get_next_child(ofdev->node, np)))
215 if (!strcmp(np->type, "ethernet-phy")) 220 if (!strcmp(np->type, "ethernet-phy"))
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 7b2728b8f1b7..4405a76ed3da 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -49,6 +49,10 @@
49#include <net/slhc_vj.h> 49#include <net/slhc_vj.h>
50#include <asm/atomic.h> 50#include <asm/atomic.h>
51 51
52#include <linux/nsproxy.h>
53#include <net/net_namespace.h>
54#include <net/netns/generic.h>
55
52#define PPP_VERSION "2.4.2" 56#define PPP_VERSION "2.4.2"
53 57
54/* 58/*
@@ -131,6 +135,7 @@ struct ppp {
131 struct sock_filter *active_filter;/* filter for pkts to reset idle */ 135 struct sock_filter *active_filter;/* filter for pkts to reset idle */
132 unsigned pass_len, active_len; 136 unsigned pass_len, active_len;
133#endif /* CONFIG_PPP_FILTER */ 137#endif /* CONFIG_PPP_FILTER */
138 struct net *ppp_net; /* the net we belong to */
134}; 139};
135 140
136/* 141/*
@@ -155,6 +160,7 @@ struct channel {
155 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ 160 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
156 spinlock_t downl; /* protects `chan', file.xq dequeue */ 161 spinlock_t downl; /* protects `chan', file.xq dequeue */
157 struct ppp *ppp; /* ppp unit we're connected to */ 162 struct ppp *ppp; /* ppp unit we're connected to */
163 struct net *chan_net; /* the net channel belongs to */
158 struct list_head clist; /* link in list of channels per unit */ 164 struct list_head clist; /* link in list of channels per unit */
159 rwlock_t upl; /* protects `ppp' */ 165 rwlock_t upl; /* protects `ppp' */
160#ifdef CONFIG_PPP_MULTILINK 166#ifdef CONFIG_PPP_MULTILINK
@@ -173,26 +179,35 @@ struct channel {
173 * channel.downl. 179 * channel.downl.
174 */ 180 */
175 181
176/*
177 * all_ppp_mutex protects the all_ppp_units mapping.
178 * It also ensures that finding a ppp unit in the all_ppp_units map
179 * and updating its file.refcnt field is atomic.
180 */
181static DEFINE_MUTEX(all_ppp_mutex);
182static atomic_t ppp_unit_count = ATOMIC_INIT(0); 182static atomic_t ppp_unit_count = ATOMIC_INIT(0);
183static DEFINE_IDR(ppp_units_idr);
184
185/*
186 * all_channels_lock protects all_channels and last_channel_index,
187 * and the atomicity of find a channel and updating its file.refcnt
188 * field.
189 */
190static DEFINE_SPINLOCK(all_channels_lock);
191static LIST_HEAD(all_channels);
192static LIST_HEAD(new_channels);
193static int last_channel_index;
194static atomic_t channel_count = ATOMIC_INIT(0); 183static atomic_t channel_count = ATOMIC_INIT(0);
195 184
185/* per-net private data for this module */
186static unsigned int ppp_net_id;
187struct ppp_net {
188 /* units to ppp mapping */
189 struct idr units_idr;
190
191 /*
192 * all_ppp_mutex protects the units_idr mapping.
193 * It also ensures that finding a ppp unit in the units_idr
194 * map and updating its file.refcnt field is atomic.
195 */
196 struct mutex all_ppp_mutex;
197
198 /* channels */
199 struct list_head all_channels;
200 struct list_head new_channels;
201 int last_channel_index;
202
203 /*
204 * all_channels_lock protects all_channels and
205 * last_channel_index, and the atomicity of find
206 * a channel and updating its file.refcnt field.
207 */
208 spinlock_t all_channels_lock;
209};
210
196/* Get the PPP protocol number from a skb */ 211/* Get the PPP protocol number from a skb */
197#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) 212#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
198 213
@@ -216,8 +231,8 @@ static atomic_t channel_count = ATOMIC_INIT(0);
216#define seq_after(a, b) ((s32)((a) - (b)) > 0) 231#define seq_after(a, b) ((s32)((a) - (b)) > 0)
217 232
218/* Prototypes. */ 233/* Prototypes. */
219static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, 234static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
220 unsigned int cmd, unsigned long arg); 235 struct file *file, unsigned int cmd, unsigned long arg);
221static void ppp_xmit_process(struct ppp *ppp); 236static void ppp_xmit_process(struct ppp *ppp);
222static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 237static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
223static void ppp_push(struct ppp *ppp); 238static void ppp_push(struct ppp *ppp);
@@ -240,12 +255,12 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
240static void ppp_ccp_closed(struct ppp *ppp); 255static void ppp_ccp_closed(struct ppp *ppp);
241static struct compressor *find_compressor(int type); 256static struct compressor *find_compressor(int type);
242static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 257static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
243static struct ppp *ppp_create_interface(int unit, int *retp); 258static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
244static void init_ppp_file(struct ppp_file *pf, int kind); 259static void init_ppp_file(struct ppp_file *pf, int kind);
245static void ppp_shutdown_interface(struct ppp *ppp); 260static void ppp_shutdown_interface(struct ppp *ppp);
246static void ppp_destroy_interface(struct ppp *ppp); 261static void ppp_destroy_interface(struct ppp *ppp);
247static struct ppp *ppp_find_unit(int unit); 262static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
248static struct channel *ppp_find_channel(int unit); 263static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
249static int ppp_connect_channel(struct channel *pch, int unit); 264static int ppp_connect_channel(struct channel *pch, int unit);
250static int ppp_disconnect_channel(struct channel *pch); 265static int ppp_disconnect_channel(struct channel *pch);
251static void ppp_destroy_channel(struct channel *pch); 266static void ppp_destroy_channel(struct channel *pch);
@@ -256,6 +271,14 @@ static void *unit_find(struct idr *p, int n);
256 271
257static struct class *ppp_class; 272static struct class *ppp_class;
258 273
274/* per net-namespace data */
275static inline struct ppp_net *ppp_pernet(struct net *net)
276{
277 BUG_ON(!net);
278
279 return net_generic(net, ppp_net_id);
280}
281
259/* Translates a PPP protocol number to a NP index (NP == network protocol) */ 282/* Translates a PPP protocol number to a NP index (NP == network protocol) */
260static inline int proto_to_npindex(int proto) 283static inline int proto_to_npindex(int proto)
261{ 284{
@@ -544,7 +567,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
544 int __user *p = argp; 567 int __user *p = argp;
545 568
546 if (!pf) 569 if (!pf)
547 return ppp_unattached_ioctl(pf, file, cmd, arg); 570 return ppp_unattached_ioctl(current->nsproxy->net_ns,
571 pf, file, cmd, arg);
548 572
549 if (cmd == PPPIOCDETACH) { 573 if (cmd == PPPIOCDETACH) {
550 /* 574 /*
@@ -763,12 +787,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
763 return err; 787 return err;
764} 788}
765 789
766static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, 790static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
767 unsigned int cmd, unsigned long arg) 791 struct file *file, unsigned int cmd, unsigned long arg)
768{ 792{
769 int unit, err = -EFAULT; 793 int unit, err = -EFAULT;
770 struct ppp *ppp; 794 struct ppp *ppp;
771 struct channel *chan; 795 struct channel *chan;
796 struct ppp_net *pn;
772 int __user *p = (int __user *)arg; 797 int __user *p = (int __user *)arg;
773 798
774 lock_kernel(); 799 lock_kernel();
@@ -777,7 +802,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
777 /* Create a new ppp unit */ 802 /* Create a new ppp unit */
778 if (get_user(unit, p)) 803 if (get_user(unit, p))
779 break; 804 break;
780 ppp = ppp_create_interface(unit, &err); 805 ppp = ppp_create_interface(net, unit, &err);
781 if (!ppp) 806 if (!ppp)
782 break; 807 break;
783 file->private_data = &ppp->file; 808 file->private_data = &ppp->file;
@@ -792,29 +817,31 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
792 /* Attach to an existing ppp unit */ 817 /* Attach to an existing ppp unit */
793 if (get_user(unit, p)) 818 if (get_user(unit, p))
794 break; 819 break;
795 mutex_lock(&all_ppp_mutex);
796 err = -ENXIO; 820 err = -ENXIO;
797 ppp = ppp_find_unit(unit); 821 pn = ppp_pernet(net);
822 mutex_lock(&pn->all_ppp_mutex);
823 ppp = ppp_find_unit(pn, unit);
798 if (ppp) { 824 if (ppp) {
799 atomic_inc(&ppp->file.refcnt); 825 atomic_inc(&ppp->file.refcnt);
800 file->private_data = &ppp->file; 826 file->private_data = &ppp->file;
801 err = 0; 827 err = 0;
802 } 828 }
803 mutex_unlock(&all_ppp_mutex); 829 mutex_unlock(&pn->all_ppp_mutex);
804 break; 830 break;
805 831
806 case PPPIOCATTCHAN: 832 case PPPIOCATTCHAN:
807 if (get_user(unit, p)) 833 if (get_user(unit, p))
808 break; 834 break;
809 spin_lock_bh(&all_channels_lock);
810 err = -ENXIO; 835 err = -ENXIO;
811 chan = ppp_find_channel(unit); 836 pn = ppp_pernet(net);
837 spin_lock_bh(&pn->all_channels_lock);
838 chan = ppp_find_channel(pn, unit);
812 if (chan) { 839 if (chan) {
813 atomic_inc(&chan->file.refcnt); 840 atomic_inc(&chan->file.refcnt);
814 file->private_data = &chan->file; 841 file->private_data = &chan->file;
815 err = 0; 842 err = 0;
816 } 843 }
817 spin_unlock_bh(&all_channels_lock); 844 spin_unlock_bh(&pn->all_channels_lock);
818 break; 845 break;
819 846
820 default: 847 default:
@@ -834,6 +861,51 @@ static const struct file_operations ppp_device_fops = {
834 .release = ppp_release 861 .release = ppp_release
835}; 862};
836 863
864static __net_init int ppp_init_net(struct net *net)
865{
866 struct ppp_net *pn;
867 int err;
868
869 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
870 if (!pn)
871 return -ENOMEM;
872
873 idr_init(&pn->units_idr);
874 mutex_init(&pn->all_ppp_mutex);
875
876 INIT_LIST_HEAD(&pn->all_channels);
877 INIT_LIST_HEAD(&pn->new_channels);
878
879 spin_lock_init(&pn->all_channels_lock);
880
881 err = net_assign_generic(net, ppp_net_id, pn);
882 if (err) {
883 kfree(pn);
884 return err;
885 }
886
887 return 0;
888}
889
890static __net_exit void ppp_exit_net(struct net *net)
891{
892 struct ppp_net *pn;
893
894 pn = net_generic(net, ppp_net_id);
895 idr_destroy(&pn->units_idr);
896 /*
897 * if someone has cached our net then
898 * further net_generic call will return NULL
899 */
900 net_assign_generic(net, ppp_net_id, NULL);
901 kfree(pn);
902}
903
904static __net_initdata struct pernet_operations ppp_net_ops = {
905 .init = ppp_init_net,
906 .exit = ppp_exit_net,
907};
908
837#define PPP_MAJOR 108 909#define PPP_MAJOR 108
838 910
839/* Called at boot time if ppp is compiled into the kernel, 911/* Called at boot time if ppp is compiled into the kernel,
@@ -843,25 +915,36 @@ static int __init ppp_init(void)
843 int err; 915 int err;
844 916
845 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 917 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
846 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 918
847 if (!err) { 919 err = register_pernet_gen_device(&ppp_net_id, &ppp_net_ops);
848 ppp_class = class_create(THIS_MODULE, "ppp"); 920 if (err) {
849 if (IS_ERR(ppp_class)) { 921 printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
850 err = PTR_ERR(ppp_class); 922 goto out;
851 goto out_chrdev;
852 }
853 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL,
854 "ppp");
855 } 923 }
856 924
857out: 925 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
858 if (err) 926 if (err) {
859 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 927 printk(KERN_ERR "failed to register PPP device (%d)\n", err);
860 return err; 928 goto out_net;
929 }
930
931 ppp_class = class_create(THIS_MODULE, "ppp");
932 if (IS_ERR(ppp_class)) {
933 err = PTR_ERR(ppp_class);
934 goto out_chrdev;
935 }
936
937 /* not a big deal if we fail here :-) */
938 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
939
940 return 0;
861 941
862out_chrdev: 942out_chrdev:
863 unregister_chrdev(PPP_MAJOR, "ppp"); 943 unregister_chrdev(PPP_MAJOR, "ppp");
864 goto out; 944out_net:
945 unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops);
946out:
947 return err;
865} 948}
866 949
867/* 950/*
@@ -969,6 +1052,7 @@ static void ppp_setup(struct net_device *dev)
969 dev->tx_queue_len = 3; 1052 dev->tx_queue_len = 3;
970 dev->type = ARPHRD_PPP; 1053 dev->type = ARPHRD_PPP;
971 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1054 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1055 dev->features |= NETIF_F_NETNS_LOCAL;
972} 1056}
973 1057
974/* 1058/*
@@ -1986,19 +2070,27 @@ ppp_mp_reconstruct(struct ppp *ppp)
1986 * Channel interface. 2070 * Channel interface.
1987 */ 2071 */
1988 2072
1989/* 2073/* Create a new, unattached ppp channel. */
1990 * Create a new, unattached ppp channel. 2074int ppp_register_channel(struct ppp_channel *chan)
1991 */ 2075{
1992int 2076 return ppp_register_net_channel(current->nsproxy->net_ns, chan);
1993ppp_register_channel(struct ppp_channel *chan) 2077}
2078
2079/* Create a new, unattached ppp channel for specified net. */
2080int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
1994{ 2081{
1995 struct channel *pch; 2082 struct channel *pch;
2083 struct ppp_net *pn;
1996 2084
1997 pch = kzalloc(sizeof(struct channel), GFP_KERNEL); 2085 pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1998 if (!pch) 2086 if (!pch)
1999 return -ENOMEM; 2087 return -ENOMEM;
2088
2089 pn = ppp_pernet(net);
2090
2000 pch->ppp = NULL; 2091 pch->ppp = NULL;
2001 pch->chan = chan; 2092 pch->chan = chan;
2093 pch->chan_net = net;
2002 chan->ppp = pch; 2094 chan->ppp = pch;
2003 init_ppp_file(&pch->file, CHANNEL); 2095 init_ppp_file(&pch->file, CHANNEL);
2004 pch->file.hdrlen = chan->hdrlen; 2096 pch->file.hdrlen = chan->hdrlen;
@@ -2008,11 +2100,13 @@ ppp_register_channel(struct ppp_channel *chan)
2008 init_rwsem(&pch->chan_sem); 2100 init_rwsem(&pch->chan_sem);
2009 spin_lock_init(&pch->downl); 2101 spin_lock_init(&pch->downl);
2010 rwlock_init(&pch->upl); 2102 rwlock_init(&pch->upl);
2011 spin_lock_bh(&all_channels_lock); 2103
2012 pch->file.index = ++last_channel_index; 2104 spin_lock_bh(&pn->all_channels_lock);
2013 list_add(&pch->list, &new_channels); 2105 pch->file.index = ++pn->last_channel_index;
2106 list_add(&pch->list, &pn->new_channels);
2014 atomic_inc(&channel_count); 2107 atomic_inc(&channel_count);
2015 spin_unlock_bh(&all_channels_lock); 2108 spin_unlock_bh(&pn->all_channels_lock);
2109
2016 return 0; 2110 return 0;
2017} 2111}
2018 2112
@@ -2053,9 +2147,11 @@ void
2053ppp_unregister_channel(struct ppp_channel *chan) 2147ppp_unregister_channel(struct ppp_channel *chan)
2054{ 2148{
2055 struct channel *pch = chan->ppp; 2149 struct channel *pch = chan->ppp;
2150 struct ppp_net *pn;
2056 2151
2057 if (!pch) 2152 if (!pch)
2058 return; /* should never happen */ 2153 return; /* should never happen */
2154
2059 chan->ppp = NULL; 2155 chan->ppp = NULL;
2060 2156
2061 /* 2157 /*
@@ -2068,9 +2164,12 @@ ppp_unregister_channel(struct ppp_channel *chan)
2068 spin_unlock_bh(&pch->downl); 2164 spin_unlock_bh(&pch->downl);
2069 up_write(&pch->chan_sem); 2165 up_write(&pch->chan_sem);
2070 ppp_disconnect_channel(pch); 2166 ppp_disconnect_channel(pch);
2071 spin_lock_bh(&all_channels_lock); 2167
2168 pn = ppp_pernet(pch->chan_net);
2169 spin_lock_bh(&pn->all_channels_lock);
2072 list_del(&pch->list); 2170 list_del(&pch->list);
2073 spin_unlock_bh(&all_channels_lock); 2171 spin_unlock_bh(&pn->all_channels_lock);
2172
2074 pch->file.dead = 1; 2173 pch->file.dead = 1;
2075 wake_up_interruptible(&pch->file.rwait); 2174 wake_up_interruptible(&pch->file.rwait);
2076 if (atomic_dec_and_test(&pch->file.refcnt)) 2175 if (atomic_dec_and_test(&pch->file.refcnt))
@@ -2395,9 +2494,10 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2395 * unit == -1 means allocate a new number. 2494 * unit == -1 means allocate a new number.
2396 */ 2495 */
2397static struct ppp * 2496static struct ppp *
2398ppp_create_interface(int unit, int *retp) 2497ppp_create_interface(struct net *net, int unit, int *retp)
2399{ 2498{
2400 struct ppp *ppp; 2499 struct ppp *ppp;
2500 struct ppp_net *pn;
2401 struct net_device *dev = NULL; 2501 struct net_device *dev = NULL;
2402 int ret = -ENOMEM; 2502 int ret = -ENOMEM;
2403 int i; 2503 int i;
@@ -2406,6 +2506,8 @@ ppp_create_interface(int unit, int *retp)
2406 if (!dev) 2506 if (!dev)
2407 goto out1; 2507 goto out1;
2408 2508
2509 pn = ppp_pernet(net);
2510
2409 ppp = netdev_priv(dev); 2511 ppp = netdev_priv(dev);
2410 ppp->dev = dev; 2512 ppp->dev = dev;
2411 ppp->mru = PPP_MRU; 2513 ppp->mru = PPP_MRU;
@@ -2421,17 +2523,23 @@ ppp_create_interface(int unit, int *retp)
2421 skb_queue_head_init(&ppp->mrq); 2523 skb_queue_head_init(&ppp->mrq);
2422#endif /* CONFIG_PPP_MULTILINK */ 2524#endif /* CONFIG_PPP_MULTILINK */
2423 2525
2526 /*
2527 * drum roll: don't forget to set
2528 * the net device is belong to
2529 */
2530 dev_net_set(dev, net);
2531
2424 ret = -EEXIST; 2532 ret = -EEXIST;
2425 mutex_lock(&all_ppp_mutex); 2533 mutex_lock(&pn->all_ppp_mutex);
2426 2534
2427 if (unit < 0) { 2535 if (unit < 0) {
2428 unit = unit_get(&ppp_units_idr, ppp); 2536 unit = unit_get(&pn->units_idr, ppp);
2429 if (unit < 0) { 2537 if (unit < 0) {
2430 *retp = unit; 2538 *retp = unit;
2431 goto out2; 2539 goto out2;
2432 } 2540 }
2433 } else { 2541 } else {
2434 if (unit_find(&ppp_units_idr, unit)) 2542 if (unit_find(&pn->units_idr, unit))
2435 goto out2; /* unit already exists */ 2543 goto out2; /* unit already exists */
2436 /* 2544 /*
2437 * if caller need a specified unit number 2545 * if caller need a specified unit number
@@ -2442,7 +2550,7 @@ ppp_create_interface(int unit, int *retp)
2442 * fair but at least pppd will ask us to allocate 2550 * fair but at least pppd will ask us to allocate
2443 * new unit in this case so user is happy :) 2551 * new unit in this case so user is happy :)
2444 */ 2552 */
2445 unit = unit_set(&ppp_units_idr, ppp, unit); 2553 unit = unit_set(&pn->units_idr, ppp, unit);
2446 if (unit < 0) 2554 if (unit < 0)
2447 goto out2; 2555 goto out2;
2448 } 2556 }
@@ -2453,20 +2561,22 @@ ppp_create_interface(int unit, int *retp)
2453 2561
2454 ret = register_netdev(dev); 2562 ret = register_netdev(dev);
2455 if (ret != 0) { 2563 if (ret != 0) {
2456 unit_put(&ppp_units_idr, unit); 2564 unit_put(&pn->units_idr, unit);
2457 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2565 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
2458 dev->name, ret); 2566 dev->name, ret);
2459 goto out2; 2567 goto out2;
2460 } 2568 }
2461 2569
2570 ppp->ppp_net = net;
2571
2462 atomic_inc(&ppp_unit_count); 2572 atomic_inc(&ppp_unit_count);
2463 mutex_unlock(&all_ppp_mutex); 2573 mutex_unlock(&pn->all_ppp_mutex);
2464 2574
2465 *retp = 0; 2575 *retp = 0;
2466 return ppp; 2576 return ppp;
2467 2577
2468out2: 2578out2:
2469 mutex_unlock(&all_ppp_mutex); 2579 mutex_unlock(&pn->all_ppp_mutex);
2470 free_netdev(dev); 2580 free_netdev(dev);
2471out1: 2581out1:
2472 *retp = ret; 2582 *retp = ret;
@@ -2492,7 +2602,11 @@ init_ppp_file(struct ppp_file *pf, int kind)
2492 */ 2602 */
2493static void ppp_shutdown_interface(struct ppp *ppp) 2603static void ppp_shutdown_interface(struct ppp *ppp)
2494{ 2604{
2495 mutex_lock(&all_ppp_mutex); 2605 struct ppp_net *pn;
2606
2607 pn = ppp_pernet(ppp->ppp_net);
2608 mutex_lock(&pn->all_ppp_mutex);
2609
2496 /* This will call dev_close() for us. */ 2610 /* This will call dev_close() for us. */
2497 ppp_lock(ppp); 2611 ppp_lock(ppp);
2498 if (!ppp->closing) { 2612 if (!ppp->closing) {
@@ -2502,11 +2616,12 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2502 } else 2616 } else
2503 ppp_unlock(ppp); 2617 ppp_unlock(ppp);
2504 2618
2505 unit_put(&ppp_units_idr, ppp->file.index); 2619 unit_put(&pn->units_idr, ppp->file.index);
2506 ppp->file.dead = 1; 2620 ppp->file.dead = 1;
2507 ppp->owner = NULL; 2621 ppp->owner = NULL;
2508 wake_up_interruptible(&ppp->file.rwait); 2622 wake_up_interruptible(&ppp->file.rwait);
2509 mutex_unlock(&all_ppp_mutex); 2623
2624 mutex_unlock(&pn->all_ppp_mutex);
2510} 2625}
2511 2626
2512/* 2627/*
@@ -2554,9 +2669,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
2554 * The caller should have locked the all_ppp_mutex. 2669 * The caller should have locked the all_ppp_mutex.
2555 */ 2670 */
2556static struct ppp * 2671static struct ppp *
2557ppp_find_unit(int unit) 2672ppp_find_unit(struct ppp_net *pn, int unit)
2558{ 2673{
2559 return unit_find(&ppp_units_idr, unit); 2674 return unit_find(&pn->units_idr, unit);
2560} 2675}
2561 2676
2562/* 2677/*
@@ -2568,20 +2683,22 @@ ppp_find_unit(int unit)
2568 * when we have a lot of channels in use. 2683 * when we have a lot of channels in use.
2569 */ 2684 */
2570static struct channel * 2685static struct channel *
2571ppp_find_channel(int unit) 2686ppp_find_channel(struct ppp_net *pn, int unit)
2572{ 2687{
2573 struct channel *pch; 2688 struct channel *pch;
2574 2689
2575 list_for_each_entry(pch, &new_channels, list) { 2690 list_for_each_entry(pch, &pn->new_channels, list) {
2576 if (pch->file.index == unit) { 2691 if (pch->file.index == unit) {
2577 list_move(&pch->list, &all_channels); 2692 list_move(&pch->list, &pn->all_channels);
2578 return pch; 2693 return pch;
2579 } 2694 }
2580 } 2695 }
2581 list_for_each_entry(pch, &all_channels, list) { 2696
2697 list_for_each_entry(pch, &pn->all_channels, list) {
2582 if (pch->file.index == unit) 2698 if (pch->file.index == unit)
2583 return pch; 2699 return pch;
2584 } 2700 }
2701
2585 return NULL; 2702 return NULL;
2586} 2703}
2587 2704
@@ -2592,11 +2709,14 @@ static int
2592ppp_connect_channel(struct channel *pch, int unit) 2709ppp_connect_channel(struct channel *pch, int unit)
2593{ 2710{
2594 struct ppp *ppp; 2711 struct ppp *ppp;
2712 struct ppp_net *pn;
2595 int ret = -ENXIO; 2713 int ret = -ENXIO;
2596 int hdrlen; 2714 int hdrlen;
2597 2715
2598 mutex_lock(&all_ppp_mutex); 2716 pn = ppp_pernet(pch->chan_net);
2599 ppp = ppp_find_unit(unit); 2717
2718 mutex_lock(&pn->all_ppp_mutex);
2719 ppp = ppp_find_unit(pn, unit);
2600 if (!ppp) 2720 if (!ppp)
2601 goto out; 2721 goto out;
2602 write_lock_bh(&pch->upl); 2722 write_lock_bh(&pch->upl);
@@ -2620,7 +2740,7 @@ ppp_connect_channel(struct channel *pch, int unit)
2620 outl: 2740 outl:
2621 write_unlock_bh(&pch->upl); 2741 write_unlock_bh(&pch->upl);
2622 out: 2742 out:
2623 mutex_unlock(&all_ppp_mutex); 2743 mutex_unlock(&pn->all_ppp_mutex);
2624 return ret; 2744 return ret;
2625} 2745}
2626 2746
@@ -2677,7 +2797,7 @@ static void __exit ppp_cleanup(void)
2677 unregister_chrdev(PPP_MAJOR, "ppp"); 2797 unregister_chrdev(PPP_MAJOR, "ppp");
2678 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2798 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2679 class_destroy(ppp_class); 2799 class_destroy(ppp_class);
2680 idr_destroy(&ppp_units_idr); 2800 unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops);
2681} 2801}
2682 2802
2683/* 2803/*
@@ -2743,6 +2863,7 @@ static void *unit_find(struct idr *p, int n)
2743module_init(ppp_init); 2863module_init(ppp_init);
2744module_exit(ppp_cleanup); 2864module_exit(ppp_cleanup);
2745 2865
2866EXPORT_SYMBOL(ppp_register_net_channel);
2746EXPORT_SYMBOL(ppp_register_channel); 2867EXPORT_SYMBOL(ppp_register_channel);
2747EXPORT_SYMBOL(ppp_unregister_channel); 2868EXPORT_SYMBOL(ppp_unregister_channel);
2748EXPORT_SYMBOL(ppp_channel_index); 2869EXPORT_SYMBOL(ppp_channel_index);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index c22b30533a14..798b8cf5f9a6 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -78,38 +78,73 @@
78#include <linux/proc_fs.h> 78#include <linux/proc_fs.h>
79#include <linux/seq_file.h> 79#include <linux/seq_file.h>
80 80
81#include <linux/nsproxy.h>
81#include <net/net_namespace.h> 82#include <net/net_namespace.h>
83#include <net/netns/generic.h>
82#include <net/sock.h> 84#include <net/sock.h>
83 85
84#include <asm/uaccess.h> 86#include <asm/uaccess.h>
85 87
86#define PPPOE_HASH_BITS 4 88#define PPPOE_HASH_BITS 4
87#define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS) 89#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
88 90#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
89static struct ppp_channel_ops pppoe_chan_ops;
90 91
91static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 92static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
92static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb); 93static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
93static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 94static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
94 95
95static const struct proto_ops pppoe_ops; 96static const struct proto_ops pppoe_ops;
96static DEFINE_RWLOCK(pppoe_hash_lock);
97
98static struct ppp_channel_ops pppoe_chan_ops; 97static struct ppp_channel_ops pppoe_chan_ops;
99 98
99/* per-net private data for this module */
100static unsigned int pppoe_net_id;
101struct pppoe_net {
102 /*
103 * we could use _single_ hash table for all
104 * nets by injecting net id into the hash but
105 * it would increase hash chains and add
106 * a few additional math comparations messy
107 * as well, moreover in case of SMP less locking
108 * controversy here
109 */
110 struct pppox_sock *hash_table[PPPOE_HASH_SIZE];
111 rwlock_t hash_lock;
112};
113
114/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
115static DEFINE_SPINLOCK(flush_lock);
116
117/*
118 * PPPoE could be in the following stages:
119 * 1) Discovery stage (to obtain remote MAC and Session ID)
120 * 2) Session stage (MAC and SID are known)
121 *
122 * Ethernet frames have a special tag for this but
123 * we use simplier approach based on session id
124 */
125static inline bool stage_session(__be16 sid)
126{
127 return sid != 0;
128}
129
130static inline struct pppoe_net *pppoe_pernet(struct net *net)
131{
132 BUG_ON(!net);
133
134 return net_generic(net, pppoe_net_id);
135}
136
100static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) 137static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
101{ 138{
102 return (a->sid == b->sid && 139 return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN);
103 (memcmp(a->remote, b->remote, ETH_ALEN) == 0));
104} 140}
105 141
106static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) 142static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
107{ 143{
108 return (a->sid == sid && 144 return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN);
109 (memcmp(a->remote,addr,ETH_ALEN) == 0));
110} 145}
111 146
112#if 8%PPPOE_HASH_BITS 147#if 8 % PPPOE_HASH_BITS
113#error 8 must be a multiple of PPPOE_HASH_BITS 148#error 8 must be a multiple of PPPOE_HASH_BITS
114#endif 149#endif
115 150
@@ -118,69 +153,71 @@ static int hash_item(__be16 sid, unsigned char *addr)
118 unsigned char hash = 0; 153 unsigned char hash = 0;
119 unsigned int i; 154 unsigned int i;
120 155
121 for (i = 0 ; i < ETH_ALEN ; i++) { 156 for (i = 0; i < ETH_ALEN; i++)
122 hash ^= addr[i]; 157 hash ^= addr[i];
123 } 158 for (i = 0; i < sizeof(sid_t) * 8; i += 8)
124 for (i = 0 ; i < sizeof(sid_t)*8 ; i += 8 ){ 159 hash ^= (__force __u32)sid >> i;
125 hash ^= (__force __u32)sid>>i; 160 for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;)
126 } 161 hash ^= hash >> i;
127 for (i = 8 ; (i>>=1) >= PPPOE_HASH_BITS ; ) {
128 hash ^= hash>>i;
129 }
130 162
131 return hash & ( PPPOE_HASH_SIZE - 1 ); 163 return hash & PPPOE_HASH_MASK;
132} 164}
133 165
134/* zeroed because its in .bss */
135static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE];
136
137/********************************************************************** 166/**********************************************************************
138 * 167 *
139 * Set/get/delete/rehash items (internal versions) 168 * Set/get/delete/rehash items (internal versions)
140 * 169 *
141 **********************************************************************/ 170 **********************************************************************/
142static struct pppox_sock *__get_item(__be16 sid, unsigned char *addr, int ifindex) 171static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
172 unsigned char *addr, int ifindex)
143{ 173{
144 int hash = hash_item(sid, addr); 174 int hash = hash_item(sid, addr);
145 struct pppox_sock *ret; 175 struct pppox_sock *ret;
146 176
147 ret = item_hash_table[hash]; 177 ret = pn->hash_table[hash];
178 while (ret) {
179 if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
180 ret->pppoe_ifindex == ifindex)
181 return ret;
148 182
149 while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex))
150 ret = ret->next; 183 ret = ret->next;
184 }
151 185
152 return ret; 186 return NULL;
153} 187}
154 188
155static int __set_item(struct pppox_sock *po) 189static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
156{ 190{
157 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 191 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
158 struct pppox_sock *ret; 192 struct pppox_sock *ret;
159 193
160 ret = item_hash_table[hash]; 194 ret = pn->hash_table[hash];
161 while (ret) { 195 while (ret) {
162 if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex) 196 if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) &&
197 ret->pppoe_ifindex == po->pppoe_ifindex)
163 return -EALREADY; 198 return -EALREADY;
164 199
165 ret = ret->next; 200 ret = ret->next;
166 } 201 }
167 202
168 po->next = item_hash_table[hash]; 203 po->next = pn->hash_table[hash];
169 item_hash_table[hash] = po; 204 pn->hash_table[hash] = po;
170 205
171 return 0; 206 return 0;
172} 207}
173 208
174static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex) 209static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid,
210 char *addr, int ifindex)
175{ 211{
176 int hash = hash_item(sid, addr); 212 int hash = hash_item(sid, addr);
177 struct pppox_sock *ret, **src; 213 struct pppox_sock *ret, **src;
178 214
179 ret = item_hash_table[hash]; 215 ret = pn->hash_table[hash];
180 src = &item_hash_table[hash]; 216 src = &pn->hash_table[hash];
181 217
182 while (ret) { 218 while (ret) {
183 if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) { 219 if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
220 ret->pppoe_ifindex == ifindex) {
184 *src = ret->next; 221 *src = ret->next;
185 break; 222 break;
186 } 223 }
@@ -197,46 +234,54 @@ static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex)
197 * Set/get/delete/rehash items 234 * Set/get/delete/rehash items
198 * 235 *
199 **********************************************************************/ 236 **********************************************************************/
200static inline struct pppox_sock *get_item(__be16 sid, 237static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid,
201 unsigned char *addr, int ifindex) 238 unsigned char *addr, int ifindex)
202{ 239{
203 struct pppox_sock *po; 240 struct pppox_sock *po;
204 241
205 read_lock_bh(&pppoe_hash_lock); 242 read_lock_bh(&pn->hash_lock);
206 po = __get_item(sid, addr, ifindex); 243 po = __get_item(pn, sid, addr, ifindex);
207 if (po) 244 if (po)
208 sock_hold(sk_pppox(po)); 245 sock_hold(sk_pppox(po));
209 read_unlock_bh(&pppoe_hash_lock); 246 read_unlock_bh(&pn->hash_lock);
210 247
211 return po; 248 return po;
212} 249}
213 250
214static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) 251static inline struct pppox_sock *get_item_by_addr(struct net *net,
252 struct sockaddr_pppox *sp)
215{ 253{
216 struct net_device *dev; 254 struct net_device *dev;
255 struct pppoe_net *pn;
256 struct pppox_sock *pppox_sock;
257
217 int ifindex; 258 int ifindex;
218 259
219 dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); 260 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
220 if(!dev) 261 if (!dev)
221 return NULL; 262 return NULL;
263
222 ifindex = dev->ifindex; 264 ifindex = dev->ifindex;
265 pn = net_generic(net, pppoe_net_id);
266 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
267 sp->sa_addr.pppoe.remote, ifindex);
223 dev_put(dev); 268 dev_put(dev);
224 return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); 269
270 return pppox_sock;
225} 271}
226 272
227static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex) 273static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid,
274 char *addr, int ifindex)
228{ 275{
229 struct pppox_sock *ret; 276 struct pppox_sock *ret;
230 277
231 write_lock_bh(&pppoe_hash_lock); 278 write_lock_bh(&pn->hash_lock);
232 ret = __delete_item(sid, addr, ifindex); 279 ret = __delete_item(pn, sid, addr, ifindex);
233 write_unlock_bh(&pppoe_hash_lock); 280 write_unlock_bh(&pn->hash_lock);
234 281
235 return ret; 282 return ret;
236} 283}
237 284
238
239
240/*************************************************************************** 285/***************************************************************************
241 * 286 *
242 * Handler for device events. 287 * Handler for device events.
@@ -246,25 +291,33 @@ static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex
246 291
247static void pppoe_flush_dev(struct net_device *dev) 292static void pppoe_flush_dev(struct net_device *dev)
248{ 293{
249 int hash; 294 struct pppoe_net *pn;
295 int i;
296
250 BUG_ON(dev == NULL); 297 BUG_ON(dev == NULL);
251 298
252 write_lock_bh(&pppoe_hash_lock); 299 pn = pppoe_pernet(dev_net(dev));
253 for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) { 300 if (!pn) /* already freed */
254 struct pppox_sock *po = item_hash_table[hash]; 301 return;
302
303 write_lock_bh(&pn->hash_lock);
304 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
305 struct pppox_sock *po = pn->hash_table[i];
255 306
256 while (po != NULL) { 307 while (po != NULL) {
257 struct sock *sk = sk_pppox(po); 308 struct sock *sk;
258 if (po->pppoe_dev != dev) { 309 if (po->pppoe_dev != dev) {
259 po = po->next; 310 po = po->next;
260 continue; 311 continue;
261 } 312 }
313 sk = sk_pppox(po);
314 spin_lock(&flush_lock);
262 po->pppoe_dev = NULL; 315 po->pppoe_dev = NULL;
316 spin_unlock(&flush_lock);
263 dev_put(dev); 317 dev_put(dev);
264 318
265
266 /* We always grab the socket lock, followed by the 319 /* We always grab the socket lock, followed by the
267 * pppoe_hash_lock, in that order. Since we should 320 * hash_lock, in that order. Since we should
268 * hold the sock lock while doing any unbinding, 321 * hold the sock lock while doing any unbinding,
269 * we need to release the lock we're holding. 322 * we need to release the lock we're holding.
270 * Hold a reference to the sock so it doesn't disappear 323 * Hold a reference to the sock so it doesn't disappear
@@ -273,7 +326,7 @@ static void pppoe_flush_dev(struct net_device *dev)
273 326
274 sock_hold(sk); 327 sock_hold(sk);
275 328
276 write_unlock_bh(&pppoe_hash_lock); 329 write_unlock_bh(&pn->hash_lock);
277 lock_sock(sk); 330 lock_sock(sk);
278 331
279 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 332 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
@@ -289,20 +342,17 @@ static void pppoe_flush_dev(struct net_device *dev)
289 * While the lock was dropped the chain contents may 342 * While the lock was dropped the chain contents may
290 * have changed. 343 * have changed.
291 */ 344 */
292 write_lock_bh(&pppoe_hash_lock); 345 write_lock_bh(&pn->hash_lock);
293 po = item_hash_table[hash]; 346 po = pn->hash_table[i];
294 } 347 }
295 } 348 }
296 write_unlock_bh(&pppoe_hash_lock); 349 write_unlock_bh(&pn->hash_lock);
297} 350}
298 351
299static int pppoe_device_event(struct notifier_block *this, 352static int pppoe_device_event(struct notifier_block *this,
300 unsigned long event, void *ptr) 353 unsigned long event, void *ptr)
301{ 354{
302 struct net_device *dev = (struct net_device *) ptr; 355 struct net_device *dev = (struct net_device *)ptr;
303
304 if (dev_net(dev) != &init_net)
305 return NOTIFY_DONE;
306 356
307 /* Only look at sockets that are using this specific device. */ 357 /* Only look at sockets that are using this specific device. */
308 switch (event) { 358 switch (event) {
@@ -324,12 +374,10 @@ static int pppoe_device_event(struct notifier_block *this,
324 return NOTIFY_DONE; 374 return NOTIFY_DONE;
325} 375}
326 376
327
328static struct notifier_block pppoe_notifier = { 377static struct notifier_block pppoe_notifier = {
329 .notifier_call = pppoe_device_event, 378 .notifier_call = pppoe_device_event,
330}; 379};
331 380
332
333/************************************************************************ 381/************************************************************************
334 * 382 *
335 * Do the real work of receiving a PPPoE Session frame. 383 * Do the real work of receiving a PPPoE Session frame.
@@ -343,8 +391,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
343 if (sk->sk_state & PPPOX_BOUND) { 391 if (sk->sk_state & PPPOX_BOUND) {
344 ppp_input(&po->chan, skb); 392 ppp_input(&po->chan, skb);
345 } else if (sk->sk_state & PPPOX_RELAY) { 393 } else if (sk->sk_state & PPPOX_RELAY) {
346 relay_po = get_item_by_addr(&po->pppoe_relay); 394 relay_po = get_item_by_addr(dev_net(po->pppoe_dev),
347 395 &po->pppoe_relay);
348 if (relay_po == NULL) 396 if (relay_po == NULL)
349 goto abort_kfree; 397 goto abort_kfree;
350 398
@@ -373,22 +421,18 @@ abort_kfree:
373 * Receive wrapper called in BH context. 421 * Receive wrapper called in BH context.
374 * 422 *
375 ***********************************************************************/ 423 ***********************************************************************/
376static int pppoe_rcv(struct sk_buff *skb, 424static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
377 struct net_device *dev, 425 struct packet_type *pt, struct net_device *orig_dev)
378 struct packet_type *pt,
379 struct net_device *orig_dev)
380
381{ 426{
382 struct pppoe_hdr *ph; 427 struct pppoe_hdr *ph;
383 struct pppox_sock *po; 428 struct pppox_sock *po;
429 struct pppoe_net *pn;
384 int len; 430 int len;
385 431
386 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 432 skb = skb_share_check(skb, GFP_ATOMIC);
433 if (!skb)
387 goto out; 434 goto out;
388 435
389 if (dev_net(dev) != &init_net)
390 goto drop;
391
392 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 436 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
393 goto drop; 437 goto drop;
394 438
@@ -402,7 +446,8 @@ static int pppoe_rcv(struct sk_buff *skb,
402 if (pskb_trim_rcsum(skb, len)) 446 if (pskb_trim_rcsum(skb, len))
403 goto drop; 447 goto drop;
404 448
405 po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 449 pn = pppoe_pernet(dev_net(dev));
450 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
406 if (!po) 451 if (!po)
407 goto drop; 452 goto drop;
408 453
@@ -420,19 +465,16 @@ out:
420 * This is solely for detection of PADT frames 465 * This is solely for detection of PADT frames
421 * 466 *
422 ***********************************************************************/ 467 ***********************************************************************/
423static int pppoe_disc_rcv(struct sk_buff *skb, 468static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
424 struct net_device *dev, 469 struct packet_type *pt, struct net_device *orig_dev)
425 struct packet_type *pt,
426 struct net_device *orig_dev)
427 470
428{ 471{
429 struct pppoe_hdr *ph; 472 struct pppoe_hdr *ph;
430 struct pppox_sock *po; 473 struct pppox_sock *po;
474 struct pppoe_net *pn;
431 475
432 if (dev_net(dev) != &init_net) 476 skb = skb_share_check(skb, GFP_ATOMIC);
433 goto abort; 477 if (!skb)
434
435 if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
436 goto out; 478 goto out;
437 479
438 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 480 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
@@ -442,7 +484,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
442 if (ph->code != PADT_CODE) 484 if (ph->code != PADT_CODE)
443 goto abort; 485 goto abort;
444 486
445 po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 487 pn = pppoe_pernet(dev_net(dev));
488 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
446 if (po) { 489 if (po) {
447 struct sock *sk = sk_pppox(po); 490 struct sock *sk = sk_pppox(po);
448 491
@@ -493,38 +536,37 @@ static struct proto pppoe_sk_proto = {
493 **********************************************************************/ 536 **********************************************************************/
494static int pppoe_create(struct net *net, struct socket *sock) 537static int pppoe_create(struct net *net, struct socket *sock)
495{ 538{
496 int error = -ENOMEM;
497 struct sock *sk; 539 struct sock *sk;
498 540
499 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto); 541 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto);
500 if (!sk) 542 if (!sk)
501 goto out; 543 return -ENOMEM;
502 544
503 sock_init_data(sock, sk); 545 sock_init_data(sock, sk);
504 546
505 sock->state = SS_UNCONNECTED; 547 sock->state = SS_UNCONNECTED;
506 sock->ops = &pppoe_ops; 548 sock->ops = &pppoe_ops;
507 549
508 sk->sk_backlog_rcv = pppoe_rcv_core; 550 sk->sk_backlog_rcv = pppoe_rcv_core;
509 sk->sk_state = PPPOX_NONE; 551 sk->sk_state = PPPOX_NONE;
510 sk->sk_type = SOCK_STREAM; 552 sk->sk_type = SOCK_STREAM;
511 sk->sk_family = PF_PPPOX; 553 sk->sk_family = PF_PPPOX;
512 sk->sk_protocol = PX_PROTO_OE; 554 sk->sk_protocol = PX_PROTO_OE;
513 555
514 error = 0; 556 return 0;
515out: return error;
516} 557}
517 558
518static int pppoe_release(struct socket *sock) 559static int pppoe_release(struct socket *sock)
519{ 560{
520 struct sock *sk = sock->sk; 561 struct sock *sk = sock->sk;
521 struct pppox_sock *po; 562 struct pppox_sock *po;
563 struct pppoe_net *pn;
522 564
523 if (!sk) 565 if (!sk)
524 return 0; 566 return 0;
525 567
526 lock_sock(sk); 568 lock_sock(sk);
527 if (sock_flag(sk, SOCK_DEAD)){ 569 if (sock_flag(sk, SOCK_DEAD)) {
528 release_sock(sk); 570 release_sock(sk);
529 return -EBADF; 571 return -EBADF;
530 } 572 }
@@ -534,26 +576,39 @@ static int pppoe_release(struct socket *sock)
534 /* Signal the death of the socket. */ 576 /* Signal the death of the socket. */
535 sk->sk_state = PPPOX_DEAD; 577 sk->sk_state = PPPOX_DEAD;
536 578
579 /*
580 * pppoe_flush_dev could lead to a race with
581 * this routine so we use flush_lock to eliminate
582 * such a case (we only need per-net specific data)
583 */
584 spin_lock(&flush_lock);
585 po = pppox_sk(sk);
586 if (!po->pppoe_dev) {
587 spin_unlock(&flush_lock);
588 goto out;
589 }
590 pn = pppoe_pernet(dev_net(po->pppoe_dev));
591 spin_unlock(&flush_lock);
537 592
538 /* Write lock on hash lock protects the entire "po" struct from 593 /*
539 * concurrent updates via pppoe_flush_dev. The "po" struct should 594 * protect "po" from concurrent updates
540 * be considered part of the hash table contents, thus protected 595 * on pppoe_flush_dev
541 * by the hash table lock */ 596 */
542 write_lock_bh(&pppoe_hash_lock); 597 write_lock_bh(&pn->hash_lock);
543 598
544 po = pppox_sk(sk); 599 po = pppox_sk(sk);
545 if (po->pppoe_pa.sid) { 600 if (stage_session(po->pppoe_pa.sid))
546 __delete_item(po->pppoe_pa.sid, 601 __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
547 po->pppoe_pa.remote, po->pppoe_ifindex); 602 po->pppoe_ifindex);
548 }
549 603
550 if (po->pppoe_dev) { 604 if (po->pppoe_dev) {
551 dev_put(po->pppoe_dev); 605 dev_put(po->pppoe_dev);
552 po->pppoe_dev = NULL; 606 po->pppoe_dev = NULL;
553 } 607 }
554 608
555 write_unlock_bh(&pppoe_hash_lock); 609 write_unlock_bh(&pn->hash_lock);
556 610
611out:
557 sock_orphan(sk); 612 sock_orphan(sk);
558 sock->sk = NULL; 613 sock->sk = NULL;
559 614
@@ -564,14 +619,14 @@ static int pppoe_release(struct socket *sock)
564 return 0; 619 return 0;
565} 620}
566 621
567
568static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, 622static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
569 int sockaddr_len, int flags) 623 int sockaddr_len, int flags)
570{ 624{
571 struct sock *sk = sock->sk; 625 struct sock *sk = sock->sk;
572 struct net_device *dev; 626 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
573 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
574 struct pppox_sock *po = pppox_sk(sk); 627 struct pppox_sock *po = pppox_sk(sk);
628 struct net_device *dev;
629 struct pppoe_net *pn;
575 int error; 630 int error;
576 631
577 lock_sock(sk); 632 lock_sock(sk);
@@ -582,44 +637,45 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
582 637
583 /* Check for already bound sockets */ 638 /* Check for already bound sockets */
584 error = -EBUSY; 639 error = -EBUSY;
585 if ((sk->sk_state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid) 640 if ((sk->sk_state & PPPOX_CONNECTED) &&
641 stage_session(sp->sa_addr.pppoe.sid))
586 goto end; 642 goto end;
587 643
588 /* Check for already disconnected sockets, on attempts to disconnect */ 644 /* Check for already disconnected sockets, on attempts to disconnect */
589 error = -EALREADY; 645 error = -EALREADY;
590 if ((sk->sk_state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid ) 646 if ((sk->sk_state & PPPOX_DEAD) &&
647 !stage_session(sp->sa_addr.pppoe.sid))
591 goto end; 648 goto end;
592 649
593 error = 0; 650 error = 0;
594 if (po->pppoe_pa.sid) {
595 pppox_unbind_sock(sk);
596 651
597 /* Delete the old binding */ 652 /* Delete the old binding */
598 delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_ifindex); 653 if (stage_session(po->pppoe_pa.sid)) {
599 654 pppox_unbind_sock(sk);
600 if(po->pppoe_dev) 655 if (po->pppoe_dev) {
656 pn = pppoe_pernet(dev_net(po->pppoe_dev));
657 delete_item(pn, po->pppoe_pa.sid,
658 po->pppoe_pa.remote, po->pppoe_ifindex);
601 dev_put(po->pppoe_dev); 659 dev_put(po->pppoe_dev);
602 660 }
603 memset(sk_pppox(po) + 1, 0, 661 memset(sk_pppox(po) + 1, 0,
604 sizeof(struct pppox_sock) - sizeof(struct sock)); 662 sizeof(struct pppox_sock) - sizeof(struct sock));
605
606 sk->sk_state = PPPOX_NONE; 663 sk->sk_state = PPPOX_NONE;
607 } 664 }
608 665
609 /* Don't re-bind if sid==0 */ 666 /* Re-bind in session stage only */
610 if (sp->sa_addr.pppoe.sid != 0) { 667 if (stage_session(sp->sa_addr.pppoe.sid)) {
611 dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev);
612
613 error = -ENODEV; 668 error = -ENODEV;
669 dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev);
614 if (!dev) 670 if (!dev)
615 goto end; 671 goto end;
616 672
617 po->pppoe_dev = dev; 673 po->pppoe_dev = dev;
618 po->pppoe_ifindex = dev->ifindex; 674 po->pppoe_ifindex = dev->ifindex;
619 675 pn = pppoe_pernet(dev_net(dev));
620 write_lock_bh(&pppoe_hash_lock); 676 write_lock_bh(&pn->hash_lock);
621 if (!(dev->flags & IFF_UP)){ 677 if (!(dev->flags & IFF_UP)) {
622 write_unlock_bh(&pppoe_hash_lock); 678 write_unlock_bh(&pn->hash_lock);
623 goto err_put; 679 goto err_put;
624 } 680 }
625 681
@@ -627,8 +683,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
627 &sp->sa_addr.pppoe, 683 &sp->sa_addr.pppoe,
628 sizeof(struct pppoe_addr)); 684 sizeof(struct pppoe_addr));
629 685
630 error = __set_item(po); 686 error = __set_item(pn, po);
631 write_unlock_bh(&pppoe_hash_lock); 687 write_unlock_bh(&pn->hash_lock);
632 if (error < 0) 688 if (error < 0)
633 goto err_put; 689 goto err_put;
634 690
@@ -639,7 +695,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
639 po->chan.private = sk; 695 po->chan.private = sk;
640 po->chan.ops = &pppoe_chan_ops; 696 po->chan.ops = &pppoe_chan_ops;
641 697
642 error = ppp_register_channel(&po->chan); 698 error = ppp_register_net_channel(dev_net(dev), &po->chan);
643 if (error) 699 if (error)
644 goto err_put; 700 goto err_put;
645 701
@@ -648,7 +704,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
648 704
649 po->num = sp->sa_addr.pppoe.sid; 705 po->num = sp->sa_addr.pppoe.sid;
650 706
651 end: 707end:
652 release_sock(sk); 708 release_sock(sk);
653 return error; 709 return error;
654err_put: 710err_put:
@@ -659,7 +715,6 @@ err_put:
659 goto end; 715 goto end;
660} 716}
661 717
662
663static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, 718static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
664 int *usockaddr_len, int peer) 719 int *usockaddr_len, int peer)
665{ 720{
@@ -678,7 +733,6 @@ static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
678 return 0; 733 return 0;
679} 734}
680 735
681
682static int pppoe_ioctl(struct socket *sock, unsigned int cmd, 736static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
683 unsigned long arg) 737 unsigned long arg)
684{ 738{
@@ -690,7 +744,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
690 switch (cmd) { 744 switch (cmd) {
691 case PPPIOCGMRU: 745 case PPPIOCGMRU:
692 err = -ENXIO; 746 err = -ENXIO;
693
694 if (!(sk->sk_state & PPPOX_CONNECTED)) 747 if (!(sk->sk_state & PPPOX_CONNECTED))
695 break; 748 break;
696 749
@@ -698,7 +751,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
698 if (put_user(po->pppoe_dev->mtu - 751 if (put_user(po->pppoe_dev->mtu -
699 sizeof(struct pppoe_hdr) - 752 sizeof(struct pppoe_hdr) -
700 PPP_HDRLEN, 753 PPP_HDRLEN,
701 (int __user *) arg)) 754 (int __user *)arg))
702 break; 755 break;
703 err = 0; 756 err = 0;
704 break; 757 break;
@@ -709,7 +762,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
709 break; 762 break;
710 763
711 err = -EFAULT; 764 err = -EFAULT;
712 if (get_user(val,(int __user *) arg)) 765 if (get_user(val, (int __user *)arg))
713 break; 766 break;
714 767
715 if (val < (po->pppoe_dev->mtu 768 if (val < (po->pppoe_dev->mtu
@@ -722,7 +775,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
722 775
723 case PPPIOCSFLAGS: 776 case PPPIOCSFLAGS:
724 err = -EFAULT; 777 err = -EFAULT;
725 if (get_user(val, (int __user *) arg)) 778 if (get_user(val, (int __user *)arg))
726 break; 779 break;
727 err = 0; 780 err = 0;
728 break; 781 break;
@@ -749,13 +802,12 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
749 802
750 err = -EINVAL; 803 err = -EINVAL;
751 if (po->pppoe_relay.sa_family != AF_PPPOX || 804 if (po->pppoe_relay.sa_family != AF_PPPOX ||
752 po->pppoe_relay.sa_protocol!= PX_PROTO_OE) 805 po->pppoe_relay.sa_protocol != PX_PROTO_OE)
753 break; 806 break;
754 807
755 /* Check that the socket referenced by the address 808 /* Check that the socket referenced by the address
756 actually exists. */ 809 actually exists. */
757 relay_po = get_item_by_addr(&po->pppoe_relay); 810 relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
758
759 if (!relay_po) 811 if (!relay_po)
760 break; 812 break;
761 813
@@ -781,7 +833,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
781 return err; 833 return err;
782} 834}
783 835
784
785static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, 836static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
786 struct msghdr *m, size_t total_len) 837 struct msghdr *m, size_t total_len)
787{ 838{
@@ -808,7 +859,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
808 dev = po->pppoe_dev; 859 dev = po->pppoe_dev;
809 860
810 error = -EMSGSIZE; 861 error = -EMSGSIZE;
811 if (total_len > (dev->mtu + dev->hard_header_len)) 862 if (total_len > (dev->mtu + dev->hard_header_len))
812 goto end; 863 goto end;
813 864
814 865
@@ -828,11 +879,10 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
828 skb->priority = sk->sk_priority; 879 skb->priority = sk->sk_priority;
829 skb->protocol = __constant_htons(ETH_P_PPP_SES); 880 skb->protocol = __constant_htons(ETH_P_PPP_SES);
830 881
831 ph = (struct pppoe_hdr *) skb_put(skb, total_len + sizeof(struct pppoe_hdr)); 882 ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr));
832 start = (char *) &ph->tag[0]; 883 start = (char *)&ph->tag[0];
833 884
834 error = memcpy_fromiovec(start, m->msg_iov, total_len); 885 error = memcpy_fromiovec(start, m->msg_iov, total_len);
835
836 if (error < 0) { 886 if (error < 0) {
837 kfree_skb(skb); 887 kfree_skb(skb);
838 goto end; 888 goto end;
@@ -853,7 +903,6 @@ end:
853 return error; 903 return error;
854} 904}
855 905
856
857/************************************************************************ 906/************************************************************************
858 * 907 *
859 * xmit function for internal use. 908 * xmit function for internal use.
@@ -903,7 +952,6 @@ abort:
903 return 1; 952 return 1;
904} 953}
905 954
906
907/************************************************************************ 955/************************************************************************
908 * 956 *
909 * xmit function called by generic PPP driver 957 * xmit function called by generic PPP driver
@@ -912,11 +960,10 @@ abort:
912 ***********************************************************************/ 960 ***********************************************************************/
913static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) 961static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
914{ 962{
915 struct sock *sk = (struct sock *) chan->private; 963 struct sock *sk = (struct sock *)chan->private;
916 return __pppoe_xmit(sk, skb); 964 return __pppoe_xmit(sk, skb);
917} 965}
918 966
919
920static struct ppp_channel_ops pppoe_chan_ops = { 967static struct ppp_channel_ops pppoe_chan_ops = {
921 .start_xmit = pppoe_xmit, 968 .start_xmit = pppoe_xmit,
922}; 969};
@@ -935,7 +982,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
935 982
936 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 983 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
937 flags & MSG_DONTWAIT, &error); 984 flags & MSG_DONTWAIT, &error);
938
939 if (error < 0) 985 if (error < 0)
940 goto end; 986 goto end;
941 987
@@ -968,44 +1014,47 @@ static int pppoe_seq_show(struct seq_file *seq, void *v)
968 dev_name = po->pppoe_pa.dev; 1014 dev_name = po->pppoe_pa.dev;
969 1015
970 seq_printf(seq, "%08X %pM %8s\n", 1016 seq_printf(seq, "%08X %pM %8s\n",
971 po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name); 1017 po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name);
972out: 1018out:
973 return 0; 1019 return 0;
974} 1020}
975 1021
976static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos) 1022static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos)
977{ 1023{
978 struct pppox_sock *po; 1024 struct pppox_sock *po;
979 int i = 0; 1025 int i;
980 1026
981 for (; i < PPPOE_HASH_SIZE; i++) { 1027 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
982 po = item_hash_table[i]; 1028 po = pn->hash_table[i];
983 while (po) { 1029 while (po) {
984 if (!pos--) 1030 if (!pos--)
985 goto out; 1031 goto out;
986 po = po->next; 1032 po = po->next;
987 } 1033 }
988 } 1034 }
1035
989out: 1036out:
990 return po; 1037 return po;
991} 1038}
992 1039
993static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos) 1040static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
994 __acquires(pppoe_hash_lock) 1041 __acquires(pn->hash_lock)
995{ 1042{
1043 struct pppoe_net *pn = pppoe_pernet(seq->private);
996 loff_t l = *pos; 1044 loff_t l = *pos;
997 1045
998 read_lock_bh(&pppoe_hash_lock); 1046 read_lock_bh(&pn->hash_lock);
999 return l ? pppoe_get_idx(--l) : SEQ_START_TOKEN; 1047 return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN;
1000} 1048}
1001 1049
1002static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1050static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1003{ 1051{
1052 struct pppoe_net *pn = pppoe_pernet(seq->private);
1004 struct pppox_sock *po; 1053 struct pppox_sock *po;
1005 1054
1006 ++*pos; 1055 ++*pos;
1007 if (v == SEQ_START_TOKEN) { 1056 if (v == SEQ_START_TOKEN) {
1008 po = pppoe_get_idx(0); 1057 po = pppoe_get_idx(pn, 0);
1009 goto out; 1058 goto out;
1010 } 1059 }
1011 po = v; 1060 po = v;
@@ -1015,22 +1064,24 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1015 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 1064 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
1016 1065
1017 while (++hash < PPPOE_HASH_SIZE) { 1066 while (++hash < PPPOE_HASH_SIZE) {
1018 po = item_hash_table[hash]; 1067 po = pn->hash_table[hash];
1019 if (po) 1068 if (po)
1020 break; 1069 break;
1021 } 1070 }
1022 } 1071 }
1072
1023out: 1073out:
1024 return po; 1074 return po;
1025} 1075}
1026 1076
1027static void pppoe_seq_stop(struct seq_file *seq, void *v) 1077static void pppoe_seq_stop(struct seq_file *seq, void *v)
1028 __releases(pppoe_hash_lock) 1078 __releases(pn->hash_lock)
1029{ 1079{
1030 read_unlock_bh(&pppoe_hash_lock); 1080 struct pppoe_net *pn = pppoe_pernet(seq->private);
1081 read_unlock_bh(&pn->hash_lock);
1031} 1082}
1032 1083
1033static struct seq_operations pppoe_seq_ops = { 1084static const struct seq_operations pppoe_seq_ops = {
1034 .start = pppoe_seq_start, 1085 .start = pppoe_seq_start,
1035 .next = pppoe_seq_next, 1086 .next = pppoe_seq_next,
1036 .stop = pppoe_seq_stop, 1087 .stop = pppoe_seq_stop,
@@ -1039,7 +1090,30 @@ static struct seq_operations pppoe_seq_ops = {
1039 1090
1040static int pppoe_seq_open(struct inode *inode, struct file *file) 1091static int pppoe_seq_open(struct inode *inode, struct file *file)
1041{ 1092{
1042 return seq_open(file, &pppoe_seq_ops); 1093 struct seq_file *m;
1094 struct net *net;
1095 int err;
1096
1097 err = seq_open(file, &pppoe_seq_ops);
1098 if (err)
1099 return err;
1100
1101 m = file->private_data;
1102 net = maybe_get_net(PDE_NET(PDE(inode)));
1103 BUG_ON(!net);
1104 m->private = net;
1105
1106 return err;
1107}
1108
1109static int pppoe_seq_release(struct inode *inode, struct file *file)
1110{
1111 struct seq_file *m;
1112
1113 m = file->private_data;
1114 put_net((struct net*)m->private);
1115
1116 return seq_release(inode, file);
1043} 1117}
1044 1118
1045static const struct file_operations pppoe_seq_fops = { 1119static const struct file_operations pppoe_seq_fops = {
@@ -1047,74 +1121,115 @@ static const struct file_operations pppoe_seq_fops = {
1047 .open = pppoe_seq_open, 1121 .open = pppoe_seq_open,
1048 .read = seq_read, 1122 .read = seq_read,
1049 .llseek = seq_lseek, 1123 .llseek = seq_lseek,
1050 .release = seq_release, 1124 .release = pppoe_seq_release,
1051}; 1125};
1052 1126
1053static int __init pppoe_proc_init(void)
1054{
1055 struct proc_dir_entry *p;
1056
1057 p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops);
1058 if (!p)
1059 return -ENOMEM;
1060 return 0;
1061}
1062#else /* CONFIG_PROC_FS */
1063static inline int pppoe_proc_init(void) { return 0; }
1064#endif /* CONFIG_PROC_FS */ 1127#endif /* CONFIG_PROC_FS */
1065 1128
1066static const struct proto_ops pppoe_ops = { 1129static const struct proto_ops pppoe_ops = {
1067 .family = AF_PPPOX, 1130 .family = AF_PPPOX,
1068 .owner = THIS_MODULE, 1131 .owner = THIS_MODULE,
1069 .release = pppoe_release, 1132 .release = pppoe_release,
1070 .bind = sock_no_bind, 1133 .bind = sock_no_bind,
1071 .connect = pppoe_connect, 1134 .connect = pppoe_connect,
1072 .socketpair = sock_no_socketpair, 1135 .socketpair = sock_no_socketpair,
1073 .accept = sock_no_accept, 1136 .accept = sock_no_accept,
1074 .getname = pppoe_getname, 1137 .getname = pppoe_getname,
1075 .poll = datagram_poll, 1138 .poll = datagram_poll,
1076 .listen = sock_no_listen, 1139 .listen = sock_no_listen,
1077 .shutdown = sock_no_shutdown, 1140 .shutdown = sock_no_shutdown,
1078 .setsockopt = sock_no_setsockopt, 1141 .setsockopt = sock_no_setsockopt,
1079 .getsockopt = sock_no_getsockopt, 1142 .getsockopt = sock_no_getsockopt,
1080 .sendmsg = pppoe_sendmsg, 1143 .sendmsg = pppoe_sendmsg,
1081 .recvmsg = pppoe_recvmsg, 1144 .recvmsg = pppoe_recvmsg,
1082 .mmap = sock_no_mmap, 1145 .mmap = sock_no_mmap,
1083 .ioctl = pppox_ioctl, 1146 .ioctl = pppox_ioctl,
1084}; 1147};
1085 1148
1086static struct pppox_proto pppoe_proto = { 1149static struct pppox_proto pppoe_proto = {
1087 .create = pppoe_create, 1150 .create = pppoe_create,
1088 .ioctl = pppoe_ioctl, 1151 .ioctl = pppoe_ioctl,
1089 .owner = THIS_MODULE, 1152 .owner = THIS_MODULE,
1090}; 1153};
1091 1154
1155static __net_init int pppoe_init_net(struct net *net)
1156{
1157 struct pppoe_net *pn;
1158 struct proc_dir_entry *pde;
1159 int err;
1160
1161 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
1162 if (!pn)
1163 return -ENOMEM;
1164
1165 rwlock_init(&pn->hash_lock);
1166
1167 err = net_assign_generic(net, pppoe_net_id, pn);
1168 if (err)
1169 goto out;
1170
1171 pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops);
1172#ifdef CONFIG_PROC_FS
1173 if (!pde) {
1174 err = -ENOMEM;
1175 goto out;
1176 }
1177#endif
1178
1179 return 0;
1180
1181out:
1182 kfree(pn);
1183 return err;
1184}
1185
1186static __net_exit void pppoe_exit_net(struct net *net)
1187{
1188 struct pppoe_net *pn;
1189
1190 proc_net_remove(net, "pppoe");
1191 pn = net_generic(net, pppoe_net_id);
1192 /*
1193 * if someone has cached our net then
1194 * further net_generic call will return NULL
1195 */
1196 net_assign_generic(net, pppoe_net_id, NULL);
1197 kfree(pn);
1198}
1199
1200static __net_initdata struct pernet_operations pppoe_net_ops = {
1201 .init = pppoe_init_net,
1202 .exit = pppoe_exit_net,
1203};
1092 1204
1093static int __init pppoe_init(void) 1205static int __init pppoe_init(void)
1094{ 1206{
1095 int err = proto_register(&pppoe_sk_proto, 0); 1207 int err;
1096 1208
1209 err = proto_register(&pppoe_sk_proto, 0);
1097 if (err) 1210 if (err)
1098 goto out; 1211 goto out;
1099 1212
1100 err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto); 1213 err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
1101 if (err) 1214 if (err)
1102 goto out_unregister_pppoe_proto; 1215 goto out_unregister_pppoe_proto;
1103 1216
1104 err = pppoe_proc_init(); 1217 err = register_pernet_gen_device(&pppoe_net_id, &pppoe_net_ops);
1105 if (err) 1218 if (err)
1106 goto out_unregister_pppox_proto; 1219 goto out_unregister_pppox_proto;
1107 1220
1108 dev_add_pack(&pppoes_ptype); 1221 dev_add_pack(&pppoes_ptype);
1109 dev_add_pack(&pppoed_ptype); 1222 dev_add_pack(&pppoed_ptype);
1110 register_netdevice_notifier(&pppoe_notifier); 1223 register_netdevice_notifier(&pppoe_notifier);
1111out: 1224
1112 return err; 1225 return 0;
1226
1113out_unregister_pppox_proto: 1227out_unregister_pppox_proto:
1114 unregister_pppox_proto(PX_PROTO_OE); 1228 unregister_pppox_proto(PX_PROTO_OE);
1115out_unregister_pppoe_proto: 1229out_unregister_pppoe_proto:
1116 proto_unregister(&pppoe_sk_proto); 1230 proto_unregister(&pppoe_sk_proto);
1117 goto out; 1231out:
1232 return err;
1118} 1233}
1119 1234
1120static void __exit pppoe_exit(void) 1235static void __exit pppoe_exit(void)
@@ -1123,7 +1238,7 @@ static void __exit pppoe_exit(void)
1123 dev_remove_pack(&pppoes_ptype); 1238 dev_remove_pack(&pppoes_ptype);
1124 dev_remove_pack(&pppoed_ptype); 1239 dev_remove_pack(&pppoed_ptype);
1125 unregister_netdevice_notifier(&pppoe_notifier); 1240 unregister_netdevice_notifier(&pppoe_notifier);
1126 remove_proc_entry("pppoe", init_net.proc_net); 1241 unregister_pernet_gen_device(pppoe_net_id, &pppoe_net_ops);
1127 proto_unregister(&pppoe_sk_proto); 1242 proto_unregister(&pppoe_sk_proto);
1128} 1243}
1129 1244
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index f1a946785c6a..056e22a784b8 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -90,7 +90,9 @@
90#include <linux/hash.h> 90#include <linux/hash.h>
91#include <linux/sort.h> 91#include <linux/sort.h>
92#include <linux/proc_fs.h> 92#include <linux/proc_fs.h>
93#include <linux/nsproxy.h>
93#include <net/net_namespace.h> 94#include <net/net_namespace.h>
95#include <net/netns/generic.h>
94#include <net/dst.h> 96#include <net/dst.h>
95#include <net/ip.h> 97#include <net/ip.h>
96#include <net/udp.h> 98#include <net/udp.h>
@@ -204,6 +206,7 @@ struct pppol2tp_tunnel
204 struct sock *sock; /* Parent socket */ 206 struct sock *sock; /* Parent socket */
205 struct list_head list; /* Keep a list of all open 207 struct list_head list; /* Keep a list of all open
206 * prepared sockets */ 208 * prepared sockets */
209 struct net *pppol2tp_net; /* the net we belong to */
207 210
208 atomic_t ref_count; 211 atomic_t ref_count;
209}; 212};
@@ -227,8 +230,20 @@ static atomic_t pppol2tp_tunnel_count;
227static atomic_t pppol2tp_session_count; 230static atomic_t pppol2tp_session_count;
228static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; 231static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
229static struct proto_ops pppol2tp_ops; 232static struct proto_ops pppol2tp_ops;
230static LIST_HEAD(pppol2tp_tunnel_list); 233
231static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock); 234/* per-net private data for this module */
235static unsigned int pppol2tp_net_id;
236struct pppol2tp_net {
237 struct list_head pppol2tp_tunnel_list;
238 rwlock_t pppol2tp_tunnel_list_lock;
239};
240
241static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net)
242{
243 BUG_ON(!net);
244
245 return net_generic(net, pppol2tp_net_id);
246}
232 247
233/* Helpers to obtain tunnel/session contexts from sockets. 248/* Helpers to obtain tunnel/session contexts from sockets.
234 */ 249 */
@@ -321,18 +336,19 @@ pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
321 336
322/* Lookup a tunnel by id 337/* Lookup a tunnel by id
323 */ 338 */
324static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id) 339static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id)
325{ 340{
326 struct pppol2tp_tunnel *tunnel = NULL; 341 struct pppol2tp_tunnel *tunnel;
342 struct pppol2tp_net *pn = pppol2tp_pernet(net);
327 343
328 read_lock_bh(&pppol2tp_tunnel_list_lock); 344 read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
329 list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) { 345 list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) {
330 if (tunnel->stats.tunnel_id == tunnel_id) { 346 if (tunnel->stats.tunnel_id == tunnel_id) {
331 read_unlock_bh(&pppol2tp_tunnel_list_lock); 347 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
332 return tunnel; 348 return tunnel;
333 } 349 }
334 } 350 }
335 read_unlock_bh(&pppol2tp_tunnel_list_lock); 351 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
336 352
337 return NULL; 353 return NULL;
338} 354}
@@ -1287,10 +1303,12 @@ again:
1287 */ 1303 */
1288static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) 1304static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
1289{ 1305{
1306 struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net);
1307
1290 /* Remove from socket list */ 1308 /* Remove from socket list */
1291 write_lock_bh(&pppol2tp_tunnel_list_lock); 1309 write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
1292 list_del_init(&tunnel->list); 1310 list_del_init(&tunnel->list);
1293 write_unlock_bh(&pppol2tp_tunnel_list_lock); 1311 write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
1294 1312
1295 atomic_dec(&pppol2tp_tunnel_count); 1313 atomic_dec(&pppol2tp_tunnel_count);
1296 kfree(tunnel); 1314 kfree(tunnel);
@@ -1444,13 +1462,14 @@ error:
1444/* Internal function to prepare a tunnel (UDP) socket to have PPPoX 1462/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
1445 * sockets attached to it. 1463 * sockets attached to it.
1446 */ 1464 */
1447static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, 1465static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
1448 int *error) 1466 int fd, u16 tunnel_id, int *error)
1449{ 1467{
1450 int err; 1468 int err;
1451 struct socket *sock = NULL; 1469 struct socket *sock = NULL;
1452 struct sock *sk; 1470 struct sock *sk;
1453 struct pppol2tp_tunnel *tunnel; 1471 struct pppol2tp_tunnel *tunnel;
1472 struct pppol2tp_net *pn;
1454 struct sock *ret = NULL; 1473 struct sock *ret = NULL;
1455 1474
1456 /* Get the tunnel UDP socket from the fd, which was opened by 1475 /* Get the tunnel UDP socket from the fd, which was opened by
@@ -1524,11 +1543,15 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
1524 /* Misc init */ 1543 /* Misc init */
1525 rwlock_init(&tunnel->hlist_lock); 1544 rwlock_init(&tunnel->hlist_lock);
1526 1545
1546 /* The net we belong to */
1547 tunnel->pppol2tp_net = net;
1548 pn = pppol2tp_pernet(net);
1549
1527 /* Add tunnel to our list */ 1550 /* Add tunnel to our list */
1528 INIT_LIST_HEAD(&tunnel->list); 1551 INIT_LIST_HEAD(&tunnel->list);
1529 write_lock_bh(&pppol2tp_tunnel_list_lock); 1552 write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
1530 list_add(&tunnel->list, &pppol2tp_tunnel_list); 1553 list_add(&tunnel->list, &pn->pppol2tp_tunnel_list);
1531 write_unlock_bh(&pppol2tp_tunnel_list_lock); 1554 write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
1532 atomic_inc(&pppol2tp_tunnel_count); 1555 atomic_inc(&pppol2tp_tunnel_count);
1533 1556
1534 /* Bump the reference count. The tunnel context is deleted 1557 /* Bump the reference count. The tunnel context is deleted
@@ -1629,7 +1652,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1629 * tunnel id. 1652 * tunnel id.
1630 */ 1653 */
1631 if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) { 1654 if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
1632 tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd, 1655 tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk),
1656 sp->pppol2tp.fd,
1633 sp->pppol2tp.s_tunnel, 1657 sp->pppol2tp.s_tunnel,
1634 &error); 1658 &error);
1635 if (tunnel_sock == NULL) 1659 if (tunnel_sock == NULL)
@@ -1637,7 +1661,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1637 1661
1638 tunnel = tunnel_sock->sk_user_data; 1662 tunnel = tunnel_sock->sk_user_data;
1639 } else { 1663 } else {
1640 tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel); 1664 tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
1641 1665
1642 /* Error if we can't find the tunnel */ 1666 /* Error if we can't find the tunnel */
1643 error = -ENOENT; 1667 error = -ENOENT;
@@ -1725,7 +1749,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1725 po->chan.ops = &pppol2tp_chan_ops; 1749 po->chan.ops = &pppol2tp_chan_ops;
1726 po->chan.mtu = session->mtu; 1750 po->chan.mtu = session->mtu;
1727 1751
1728 error = ppp_register_channel(&po->chan); 1752 error = ppp_register_net_channel(sock_net(sk), &po->chan);
1729 if (error) 1753 if (error)
1730 goto end_put_tun; 1754 goto end_put_tun;
1731 1755
@@ -2347,8 +2371,9 @@ end:
2347#include <linux/seq_file.h> 2371#include <linux/seq_file.h>
2348 2372
2349struct pppol2tp_seq_data { 2373struct pppol2tp_seq_data {
2350 struct pppol2tp_tunnel *tunnel; /* current tunnel */ 2374 struct net *seq_net; /* net of inode */
2351 struct pppol2tp_session *session; /* NULL means get first session in tunnel */ 2375 struct pppol2tp_tunnel *tunnel; /* current tunnel */
2376 struct pppol2tp_session *session; /* NULL means get first session in tunnel */
2352}; 2377};
2353 2378
2354static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr) 2379static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
@@ -2384,17 +2409,18 @@ out:
2384 return session; 2409 return session;
2385} 2410}
2386 2411
2387static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr) 2412static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn,
2413 struct pppol2tp_tunnel *curr)
2388{ 2414{
2389 struct pppol2tp_tunnel *tunnel = NULL; 2415 struct pppol2tp_tunnel *tunnel = NULL;
2390 2416
2391 read_lock_bh(&pppol2tp_tunnel_list_lock); 2417 read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
2392 if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) { 2418 if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) {
2393 goto out; 2419 goto out;
2394 } 2420 }
2395 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); 2421 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
2396out: 2422out:
2397 read_unlock_bh(&pppol2tp_tunnel_list_lock); 2423 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
2398 2424
2399 return tunnel; 2425 return tunnel;
2400} 2426}
@@ -2402,6 +2428,7 @@ out:
2402static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) 2428static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
2403{ 2429{
2404 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; 2430 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
2431 struct pppol2tp_net *pn;
2405 loff_t pos = *offs; 2432 loff_t pos = *offs;
2406 2433
2407 if (!pos) 2434 if (!pos)
@@ -2409,14 +2436,15 @@ static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
2409 2436
2410 BUG_ON(m->private == NULL); 2437 BUG_ON(m->private == NULL);
2411 pd = m->private; 2438 pd = m->private;
2439 pn = pppol2tp_pernet(pd->seq_net);
2412 2440
2413 if (pd->tunnel == NULL) { 2441 if (pd->tunnel == NULL) {
2414 if (!list_empty(&pppol2tp_tunnel_list)) 2442 if (!list_empty(&pn->pppol2tp_tunnel_list))
2415 pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list); 2443 pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
2416 } else { 2444 } else {
2417 pd->session = next_session(pd->tunnel, pd->session); 2445 pd->session = next_session(pd->tunnel, pd->session);
2418 if (pd->session == NULL) { 2446 if (pd->session == NULL) {
2419 pd->tunnel = next_tunnel(pd->tunnel); 2447 pd->tunnel = next_tunnel(pn, pd->tunnel);
2420 } 2448 }
2421 } 2449 }
2422 2450
@@ -2517,7 +2545,7 @@ out:
2517 return 0; 2545 return 0;
2518} 2546}
2519 2547
2520static struct seq_operations pppol2tp_seq_ops = { 2548static const struct seq_operations pppol2tp_seq_ops = {
2521 .start = pppol2tp_seq_start, 2549 .start = pppol2tp_seq_start,
2522 .next = pppol2tp_seq_next, 2550 .next = pppol2tp_seq_next,
2523 .stop = pppol2tp_seq_stop, 2551 .stop = pppol2tp_seq_stop,
@@ -2532,6 +2560,7 @@ static int pppol2tp_proc_open(struct inode *inode, struct file *file)
2532{ 2560{
2533 struct seq_file *m; 2561 struct seq_file *m;
2534 struct pppol2tp_seq_data *pd; 2562 struct pppol2tp_seq_data *pd;
2563 struct net *net;
2535 int ret = 0; 2564 int ret = 0;
2536 2565
2537 ret = seq_open(file, &pppol2tp_seq_ops); 2566 ret = seq_open(file, &pppol2tp_seq_ops);
@@ -2542,12 +2571,15 @@ static int pppol2tp_proc_open(struct inode *inode, struct file *file)
2542 2571
2543 /* Allocate and fill our proc_data for access later */ 2572 /* Allocate and fill our proc_data for access later */
2544 ret = -ENOMEM; 2573 ret = -ENOMEM;
2545 m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL); 2574 m->private = kzalloc(sizeof(*pd), GFP_KERNEL);
2546 if (m->private == NULL) 2575 if (m->private == NULL)
2547 goto out; 2576 goto out;
2548 2577
2549 pd = m->private; 2578 pd = m->private;
2550 ret = 0; 2579 net = maybe_get_net(PDE_NET(PDE(inode)));
2580 BUG_ON(!net);
2581 pd->seq_net = net;
2582 return 0;
2551 2583
2552out: 2584out:
2553 return ret; 2585 return ret;
@@ -2558,6 +2590,9 @@ out:
2558static int pppol2tp_proc_release(struct inode *inode, struct file *file) 2590static int pppol2tp_proc_release(struct inode *inode, struct file *file)
2559{ 2591{
2560 struct seq_file *m = (struct seq_file *)file->private_data; 2592 struct seq_file *m = (struct seq_file *)file->private_data;
2593 struct pppol2tp_seq_data *pd = m->private;
2594
2595 put_net(pd->seq_net);
2561 2596
2562 kfree(m->private); 2597 kfree(m->private);
2563 m->private = NULL; 2598 m->private = NULL;
@@ -2565,7 +2600,7 @@ static int pppol2tp_proc_release(struct inode *inode, struct file *file)
2565 return seq_release(inode, file); 2600 return seq_release(inode, file);
2566} 2601}
2567 2602
2568static struct file_operations pppol2tp_proc_fops = { 2603static const struct file_operations pppol2tp_proc_fops = {
2569 .owner = THIS_MODULE, 2604 .owner = THIS_MODULE,
2570 .open = pppol2tp_proc_open, 2605 .open = pppol2tp_proc_open,
2571 .read = seq_read, 2606 .read = seq_read,
@@ -2573,8 +2608,6 @@ static struct file_operations pppol2tp_proc_fops = {
2573 .release = pppol2tp_proc_release, 2608 .release = pppol2tp_proc_release,
2574}; 2609};
2575 2610
2576static struct proc_dir_entry *pppol2tp_proc;
2577
2578#endif /* CONFIG_PROC_FS */ 2611#endif /* CONFIG_PROC_FS */
2579 2612
2580/***************************************************************************** 2613/*****************************************************************************
@@ -2606,6 +2639,57 @@ static struct pppox_proto pppol2tp_proto = {
2606 .ioctl = pppol2tp_ioctl 2639 .ioctl = pppol2tp_ioctl
2607}; 2640};
2608 2641
2642static __net_init int pppol2tp_init_net(struct net *net)
2643{
2644 struct pppol2tp_net *pn;
2645 struct proc_dir_entry *pde;
2646 int err;
2647
2648 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
2649 if (!pn)
2650 return -ENOMEM;
2651
2652 INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
2653 rwlock_init(&pn->pppol2tp_tunnel_list_lock);
2654
2655 err = net_assign_generic(net, pppol2tp_net_id, pn);
2656 if (err)
2657 goto out;
2658
2659 pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
2660#ifdef CONFIG_PROC_FS
2661 if (!pde) {
2662 err = -ENOMEM;
2663 goto out;
2664 }
2665#endif
2666
2667 return 0;
2668
2669out:
2670 kfree(pn);
2671 return err;
2672}
2673
2674static __net_exit void pppol2tp_exit_net(struct net *net)
2675{
2676 struct pppoe_net *pn;
2677
2678 proc_net_remove(net, "pppol2tp");
2679 pn = net_generic(net, pppol2tp_net_id);
2680 /*
2681 * if someone has cached our net then
2682 * further net_generic call will return NULL
2683 */
2684 net_assign_generic(net, pppol2tp_net_id, NULL);
2685 kfree(pn);
2686}
2687
2688static __net_initdata struct pernet_operations pppol2tp_net_ops = {
2689 .init = pppol2tp_init_net,
2690 .exit = pppol2tp_exit_net,
2691};
2692
2609static int __init pppol2tp_init(void) 2693static int __init pppol2tp_init(void)
2610{ 2694{
2611 int err; 2695 int err;
@@ -2617,23 +2701,17 @@ static int __init pppol2tp_init(void)
2617 if (err) 2701 if (err)
2618 goto out_unregister_pppol2tp_proto; 2702 goto out_unregister_pppol2tp_proto;
2619 2703
2620#ifdef CONFIG_PROC_FS 2704 err = register_pernet_gen_device(&pppol2tp_net_id, &pppol2tp_net_ops);
2621 pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0, 2705 if (err)
2622 &pppol2tp_proc_fops);
2623 if (!pppol2tp_proc) {
2624 err = -ENOMEM;
2625 goto out_unregister_pppox_proto; 2706 goto out_unregister_pppox_proto;
2626 } 2707
2627#endif /* CONFIG_PROC_FS */
2628 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", 2708 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
2629 PPPOL2TP_DRV_VERSION); 2709 PPPOL2TP_DRV_VERSION);
2630 2710
2631out: 2711out:
2632 return err; 2712 return err;
2633#ifdef CONFIG_PROC_FS
2634out_unregister_pppox_proto: 2713out_unregister_pppox_proto:
2635 unregister_pppox_proto(PX_PROTO_OL2TP); 2714 unregister_pppox_proto(PX_PROTO_OL2TP);
2636#endif
2637out_unregister_pppol2tp_proto: 2715out_unregister_pppol2tp_proto:
2638 proto_unregister(&pppol2tp_sk_proto); 2716 proto_unregister(&pppol2tp_sk_proto);
2639 goto out; 2717 goto out;
@@ -2642,10 +2720,6 @@ out_unregister_pppol2tp_proto:
2642static void __exit pppol2tp_exit(void) 2720static void __exit pppol2tp_exit(void)
2643{ 2721{
2644 unregister_pppox_proto(PX_PROTO_OL2TP); 2722 unregister_pppox_proto(PX_PROTO_OL2TP);
2645
2646#ifdef CONFIG_PROC_FS
2647 remove_proc_entry("pppol2tp", init_net.proc_net);
2648#endif
2649 proto_unregister(&pppol2tp_sk_proto); 2723 proto_unregister(&pppol2tp_sk_proto);
2650} 2724}
2651 2725
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index 03aecc97fb45..4f6d33fbc673 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -108,9 +108,6 @@ static int pppox_create(struct net *net, struct socket *sock, int protocol)
108{ 108{
109 int rc = -EPROTOTYPE; 109 int rc = -EPROTOTYPE;
110 110
111 if (net != &init_net)
112 return -EAFNOSUPPORT;
113
114 if (protocol < 0 || protocol > PX_MAX_PROTO) 111 if (protocol < 0 || protocol > PX_MAX_PROTO)
115 goto out; 112 goto out;
116 113
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 4b564eda5bd9..06649d0c2098 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -1403,6 +1403,19 @@ void gelic_net_tx_timeout(struct net_device *netdev)
1403 atomic_dec(&card->tx_timeout_task_counter); 1403 atomic_dec(&card->tx_timeout_task_counter);
1404} 1404}
1405 1405
1406static const struct net_device_ops gelic_netdevice_ops = {
1407 .ndo_open = gelic_net_open,
1408 .ndo_stop = gelic_net_stop,
1409 .ndo_start_xmit = gelic_net_xmit,
1410 .ndo_set_multicast_list = gelic_net_set_multi,
1411 .ndo_change_mtu = gelic_net_change_mtu,
1412 .ndo_tx_timeout = gelic_net_tx_timeout,
1413 .ndo_validate_addr = eth_validate_addr,
1414#ifdef CONFIG_NET_POLL_CONTROLLER
1415 .ndo_poll_controller = gelic_net_poll_controller,
1416#endif
1417};
1418
1406/** 1419/**
1407 * gelic_ether_setup_netdev_ops - initialization of net_device operations 1420 * gelic_ether_setup_netdev_ops - initialization of net_device operations
1408 * @netdev: net_device structure 1421 * @netdev: net_device structure
@@ -1412,21 +1425,12 @@ void gelic_net_tx_timeout(struct net_device *netdev)
1412static void gelic_ether_setup_netdev_ops(struct net_device *netdev, 1425static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
1413 struct napi_struct *napi) 1426 struct napi_struct *napi)
1414{ 1427{
1415 netdev->open = &gelic_net_open;
1416 netdev->stop = &gelic_net_stop;
1417 netdev->hard_start_xmit = &gelic_net_xmit;
1418 netdev->set_multicast_list = &gelic_net_set_multi;
1419 netdev->change_mtu = &gelic_net_change_mtu;
1420 /* tx watchdog */
1421 netdev->tx_timeout = &gelic_net_tx_timeout;
1422 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 1428 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
1423 /* NAPI */ 1429 /* NAPI */
1424 netif_napi_add(netdev, napi, 1430 netif_napi_add(netdev, napi,
1425 gelic_net_poll, GELIC_NET_NAPI_WEIGHT); 1431 gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
1426 netdev->ethtool_ops = &gelic_ether_ethtool_ops; 1432 netdev->ethtool_ops = &gelic_ether_ethtool_ops;
1427#ifdef CONFIG_NET_POLL_CONTROLLER 1433 netdev->netdev_ops = &gelic_netdevice_ops;
1428 netdev->poll_controller = gelic_net_poll_controller;
1429#endif
1430} 1434}
1431 1435
1432/** 1436/**
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index ec2314246682..708ae067c331 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -2697,6 +2697,19 @@ static int gelic_wl_stop(struct net_device *netdev)
2697 2697
2698/* -- */ 2698/* -- */
2699 2699
2700static const struct net_device_ops gelic_wl_netdevice_ops = {
2701 .ndo_open = gelic_wl_open,
2702 .ndo_stop = gelic_wl_stop,
2703 .ndo_start_xmit = gelic_net_xmit,
2704 .ndo_set_multicast_list = gelic_net_set_multi,
2705 .ndo_change_mtu = gelic_net_change_mtu,
2706 .ndo_tx_timeout = gelic_net_tx_timeout,
2707 .ndo_validate_addr = eth_validate_addr,
2708#ifdef CONFIG_NET_POLL_CONTROLLER
2709 .ndo_poll_controller = gelic_net_poll_controller,
2710#endif
2711};
2712
2700static struct ethtool_ops gelic_wl_ethtool_ops = { 2713static struct ethtool_ops gelic_wl_ethtool_ops = {
2701 .get_drvinfo = gelic_net_get_drvinfo, 2714 .get_drvinfo = gelic_net_get_drvinfo,
2702 .get_link = gelic_wl_get_link, 2715 .get_link = gelic_wl_get_link,
@@ -2711,21 +2724,12 @@ static void gelic_wl_setup_netdev_ops(struct net_device *netdev)
2711 struct gelic_wl_info *wl; 2724 struct gelic_wl_info *wl;
2712 wl = port_wl(netdev_priv(netdev)); 2725 wl = port_wl(netdev_priv(netdev));
2713 BUG_ON(!wl); 2726 BUG_ON(!wl);
2714 netdev->open = &gelic_wl_open;
2715 netdev->stop = &gelic_wl_stop;
2716 netdev->hard_start_xmit = &gelic_net_xmit;
2717 netdev->set_multicast_list = &gelic_net_set_multi;
2718 netdev->change_mtu = &gelic_net_change_mtu;
2719 netdev->wireless_data = &wl->wireless_data;
2720 netdev->wireless_handlers = &gelic_wl_wext_handler_def;
2721 /* tx watchdog */
2722 netdev->tx_timeout = &gelic_net_tx_timeout;
2723 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 2727 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
2724 2728
2725 netdev->ethtool_ops = &gelic_wl_ethtool_ops; 2729 netdev->ethtool_ops = &gelic_wl_ethtool_ops;
2726#ifdef CONFIG_NET_POLL_CONTROLLER 2730 netdev->netdev_ops = &gelic_wl_netdevice_ops;
2727 netdev->poll_controller = gelic_net_poll_controller; 2731 netdev->wireless_data = &wl->wireless_data;
2728#endif 2732 netdev->wireless_handlers = &gelic_wl_wext_handler_def;
2729} 2733}
2730 2734
2731/* 2735/*
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 189ec29ac7a4..8b2823c8dccf 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2292,7 +2292,7 @@ static int ql_poll(struct napi_struct *napi, int budget)
2292 2292
2293 if (tx_cleaned + rx_cleaned != budget) { 2293 if (tx_cleaned + rx_cleaned != budget) {
2294 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2294 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2295 __netif_rx_complete(napi); 2295 __napi_complete(napi);
2296 ql_update_small_bufq_prod_index(qdev); 2296 ql_update_small_bufq_prod_index(qdev);
2297 ql_update_lrg_bufq_prod_index(qdev); 2297 ql_update_lrg_bufq_prod_index(qdev);
2298 writel(qdev->rsp_consumer_index, 2298 writel(qdev->rsp_consumer_index,
@@ -2351,8 +2351,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2351 spin_unlock(&qdev->adapter_lock); 2351 spin_unlock(&qdev->adapter_lock);
2352 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2352 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2353 ql_disable_interrupts(qdev); 2353 ql_disable_interrupts(qdev);
2354 if (likely(netif_rx_schedule_prep(&qdev->napi))) { 2354 if (likely(napi_schedule_prep(&qdev->napi))) {
2355 __netif_rx_schedule(&qdev->napi); 2355 __napi_schedule(&qdev->napi);
2356 } 2356 }
2357 } else { 2357 } else {
2358 return IRQ_NONE; 2358 return IRQ_NONE;
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 45421c8b6010..16eb9dd85286 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1642,7 +1642,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1642 rx_ring->cq_id); 1642 rx_ring->cq_id);
1643 1643
1644 if (work_done < budget) { 1644 if (work_done < budget) {
1645 __netif_rx_complete(napi); 1645 __napi_complete(napi);
1646 ql_enable_completion_interrupt(qdev, rx_ring->irq); 1646 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1647 } 1647 }
1648 return work_done; 1648 return work_done;
@@ -1727,7 +1727,7 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1727static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) 1727static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1728{ 1728{
1729 struct rx_ring *rx_ring = dev_id; 1729 struct rx_ring *rx_ring = dev_id;
1730 netif_rx_schedule(&rx_ring->napi); 1730 napi_schedule(&rx_ring->napi);
1731 return IRQ_HANDLED; 1731 return IRQ_HANDLED;
1732} 1732}
1733 1733
@@ -1813,7 +1813,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1813 &rx_ring->rx_work, 1813 &rx_ring->rx_work,
1814 0); 1814 0);
1815 else 1815 else
1816 netif_rx_schedule(&rx_ring->napi); 1816 napi_schedule(&rx_ring->napi);
1817 work_done++; 1817 work_done++;
1818 } 1818 }
1819 } 1819 }
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 72fd9e97c190..cc0f886b0c29 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -677,7 +677,7 @@ static int r6040_poll(struct napi_struct *napi, int budget)
677 work_done = r6040_rx(dev, budget); 677 work_done = r6040_rx(dev, budget);
678 678
679 if (work_done < budget) { 679 if (work_done < budget) {
680 netif_rx_complete(napi); 680 napi_complete(napi);
681 /* Enable RX interrupt */ 681 /* Enable RX interrupt */
682 iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); 682 iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
683 } 683 }
@@ -714,7 +714,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
714 714
715 /* Mask off RX interrupt */ 715 /* Mask off RX interrupt */
716 misr &= ~RX_INTS; 716 misr &= ~RX_INTS;
717 netif_rx_schedule(&lp->napi); 717 napi_schedule(&lp->napi);
718 } 718 }
719 719
720 /* TX interrupt request */ 720 /* TX interrupt request */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 2c73ca606b35..1c4a980253fe 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3581,8 +3581,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
3581 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); 3581 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
3582 tp->intr_mask = ~tp->napi_event; 3582 tp->intr_mask = ~tp->napi_event;
3583 3583
3584 if (likely(netif_rx_schedule_prep(&tp->napi))) 3584 if (likely(napi_schedule_prep(&tp->napi)))
3585 __netif_rx_schedule(&tp->napi); 3585 __napi_schedule(&tp->napi);
3586 else if (netif_msg_intr(tp)) { 3586 else if (netif_msg_intr(tp)) {
3587 printk(KERN_INFO "%s: interrupt %04x in poll\n", 3587 printk(KERN_INFO "%s: interrupt %04x in poll\n",
3588 dev->name, status); 3588 dev->name, status);
@@ -3603,7 +3603,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
3603 rtl8169_tx_interrupt(dev, tp, ioaddr); 3603 rtl8169_tx_interrupt(dev, tp, ioaddr);
3604 3604
3605 if (work_done < budget) { 3605 if (work_done < budget) {
3606 netif_rx_complete(napi); 3606 napi_complete(napi);
3607 tp->intr_mask = 0xffff; 3607 tp->intr_mask = 0xffff;
3608 /* 3608 /*
3609 * 20040426: the barrier is not strictly required but the 3609 * 20040426: the barrier is not strictly required but the
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index f5c57c059bca..2a96a10fd0cf 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2852,7 +2852,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
2852 s2io_chk_rx_buffers(nic, ring); 2852 s2io_chk_rx_buffers(nic, ring);
2853 2853
2854 if (pkts_processed < budget_org) { 2854 if (pkts_processed < budget_org) {
2855 netif_rx_complete(napi); 2855 napi_complete(napi);
2856 /*Re Enable MSI-Rx Vector*/ 2856 /*Re Enable MSI-Rx Vector*/
2857 addr = (u8 __iomem *)&bar0->xmsi_mask_reg; 2857 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2858 addr += 7 - ring->ring_no; 2858 addr += 7 - ring->ring_no;
@@ -2889,7 +2889,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2889 break; 2889 break;
2890 } 2890 }
2891 if (pkts_processed < budget_org) { 2891 if (pkts_processed < budget_org) {
2892 netif_rx_complete(napi); 2892 napi_complete(napi);
2893 /* Re enable the Rx interrupts for the ring */ 2893 /* Re enable the Rx interrupts for the ring */
2894 writeq(0, &bar0->rx_traffic_mask); 2894 writeq(0, &bar0->rx_traffic_mask);
2895 readl(&bar0->rx_traffic_mask); 2895 readl(&bar0->rx_traffic_mask);
@@ -4342,7 +4342,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4342 val8 = (ring->ring_no == 0) ? 0x7f : 0xff; 4342 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4343 writeb(val8, addr); 4343 writeb(val8, addr);
4344 val8 = readb(addr); 4344 val8 = readb(addr);
4345 netif_rx_schedule(&ring->napi); 4345 napi_schedule(&ring->napi);
4346 } else { 4346 } else {
4347 rx_intr_handler(ring, 0); 4347 rx_intr_handler(ring, 0);
4348 s2io_chk_rx_buffers(sp, ring); 4348 s2io_chk_rx_buffers(sp, ring);
@@ -4789,7 +4789,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4789 4789
4790 if (config->napi) { 4790 if (config->napi) {
4791 if (reason & GEN_INTR_RXTRAFFIC) { 4791 if (reason & GEN_INTR_RXTRAFFIC) {
4792 netif_rx_schedule(&sp->napi); 4792 napi_schedule(&sp->napi);
4793 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); 4793 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4794 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4794 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4795 readl(&bar0->rx_traffic_int); 4795 readl(&bar0->rx_traffic_int);
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 31e38fae017f..3e11c1d6d792 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2039,9 +2039,9 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
2039 sbdma_tx_process(sc,&(sc->sbm_txdma), 0); 2039 sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
2040 2040
2041 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { 2041 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2042 if (netif_rx_schedule_prep(&sc->napi)) { 2042 if (napi_schedule_prep(&sc->napi)) {
2043 __raw_writeq(0, sc->sbm_imr); 2043 __raw_writeq(0, sc->sbm_imr);
2044 __netif_rx_schedule(&sc->napi); 2044 __napi_schedule(&sc->napi);
2045 /* Depend on the exit from poll to reenable intr */ 2045 /* Depend on the exit from poll to reenable intr */
2046 } 2046 }
2047 else { 2047 else {
@@ -2667,7 +2667,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
2667 sbdma_tx_process(sc, &(sc->sbm_txdma), 1); 2667 sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
2668 2668
2669 if (work_done < budget) { 2669 if (work_done < budget) {
2670 netif_rx_complete(napi); 2670 napi_complete(napi);
2671 2671
2672#ifdef CONFIG_SBMAC_COALESCE 2672#ifdef CONFIG_SBMAC_COALESCE
2673 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | 2673 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8b75bef4a841..c13cbf099b88 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -13,6 +13,9 @@
13 * Both are almost identical and seem to be based on pci-skeleton.c 13 * Both are almost identical and seem to be based on pci-skeleton.c
14 * 14 *
15 * Rewritten for 2.6 by Cesar Eduardo Barros 15 * Rewritten for 2.6 by Cesar Eduardo Barros
16 *
17 * A datasheet for this chip can be found at
18 * http://www.silan.com.cn/english/products/pdf/SC92031AY.pdf
16 */ 19 */
17 20
18/* Note about set_mac_address: I don't know how to change the hardware 21/* Note about set_mac_address: I don't know how to change the hardware
@@ -31,13 +34,7 @@
31 34
32#include <asm/irq.h> 35#include <asm/irq.h>
33 36
34#define PCI_VENDOR_ID_SILAN 0x1904
35#define PCI_DEVICE_ID_SILAN_SC92031 0x2031
36#define PCI_DEVICE_ID_SILAN_8139D 0x8139
37
38#define SC92031_NAME "sc92031" 37#define SC92031_NAME "sc92031"
39#define SC92031_DESCRIPTION "Silan SC92031 PCI Fast Ethernet Adapter driver"
40#define SC92031_VERSION "2.0c"
41 38
42/* BAR 0 is MMIO, BAR 1 is PIO */ 39/* BAR 0 is MMIO, BAR 1 is PIO */
43#ifndef SC92031_USE_BAR 40#ifndef SC92031_USE_BAR
@@ -1264,7 +1261,6 @@ static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
1264 struct pci_dev *pdev = priv->pdev; 1261 struct pci_dev *pdev = priv->pdev;
1265 1262
1266 strcpy(drvinfo->driver, SC92031_NAME); 1263 strcpy(drvinfo->driver, SC92031_NAME);
1267 strcpy(drvinfo->version, SC92031_VERSION);
1268 strcpy(drvinfo->bus_info, pci_name(pdev)); 1264 strcpy(drvinfo->bus_info, pci_name(pdev));
1269} 1265}
1270 1266
@@ -1423,6 +1419,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1423 struct net_device *dev; 1419 struct net_device *dev;
1424 struct sc92031_priv *priv; 1420 struct sc92031_priv *priv;
1425 u32 mac0, mac1; 1421 u32 mac0, mac1;
1422 unsigned long base_addr;
1426 1423
1427 err = pci_enable_device(pdev); 1424 err = pci_enable_device(pdev);
1428 if (unlikely(err < 0)) 1425 if (unlikely(err < 0))
@@ -1497,6 +1494,14 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1497 if (err < 0) 1494 if (err < 0)
1498 goto out_register_netdev; 1495 goto out_register_netdev;
1499 1496
1497#if SC92031_USE_BAR == 0
1498 base_addr = dev->mem_start;
1499#elif SC92031_USE_BAR == 1
1500 base_addr = dev->base_addr;
1501#endif
1502 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
1503 base_addr, dev->dev_addr, dev->irq);
1504
1500 return 0; 1505 return 0;
1501 1506
1502out_register_netdev: 1507out_register_netdev:
@@ -1586,8 +1591,8 @@ out:
1586} 1591}
1587 1592
1588static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { 1593static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
1589 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_SC92031) }, 1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1590 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_8139D) }, 1595 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1591 { 0, } 1596 { 0, }
1592}; 1597};
1593MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); 1598MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
@@ -1603,7 +1608,6 @@ static struct pci_driver sc92031_pci_driver = {
1603 1608
1604static int __init sc92031_init(void) 1609static int __init sc92031_init(void)
1605{ 1610{
1606 printk(KERN_INFO SC92031_DESCRIPTION " " SC92031_VERSION "\n");
1607 return pci_register_driver(&sc92031_pci_driver); 1611 return pci_register_driver(&sc92031_pci_driver);
1608} 1612}
1609 1613
@@ -1617,5 +1621,4 @@ module_exit(sc92031_exit);
1617 1621
1618MODULE_LICENSE("GPL"); 1622MODULE_LICENSE("GPL");
1619MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); 1623MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1620MODULE_DESCRIPTION(SC92031_DESCRIPTION); 1624MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");
1621MODULE_VERSION(SC92031_VERSION);
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index c535408ad6be..12a82966b577 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -2,7 +2,6 @@ config SFC
2 tristate "Solarflare Solarstorm SFC4000 support" 2 tristate "Solarflare Solarstorm SFC4000 support"
3 depends on PCI && INET 3 depends on PCI && INET
4 select MII 4 select MII
5 select INET_LRO
6 select CRC32 5 select CRC32
7 select I2C 6 select I2C
8 select I2C_ALGOBIT 7 select I2C_ALGOBIT
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 7673fd92eaf5..3ee2a4548cba 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -182,7 +182,6 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
182 channel->rx_pkt = NULL; 182 channel->rx_pkt = NULL;
183 } 183 }
184 184
185 efx_flush_lro(channel);
186 efx_rx_strategy(channel); 185 efx_rx_strategy(channel);
187 186
188 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 187 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
@@ -225,11 +224,11 @@ static int efx_poll(struct napi_struct *napi, int budget)
225 224
226 if (rx_packets < budget) { 225 if (rx_packets < budget) {
227 /* There is no race here; although napi_disable() will 226 /* There is no race here; although napi_disable() will
228 * only wait for netif_rx_complete(), this isn't a problem 227 * only wait for napi_complete(), this isn't a problem
229 * since efx_channel_processed() will have no effect if 228 * since efx_channel_processed() will have no effect if
230 * interrupts have already been disabled. 229 * interrupts have already been disabled.
231 */ 230 */
232 netif_rx_complete(napi); 231 napi_complete(napi);
233 efx_channel_processed(channel); 232 efx_channel_processed(channel);
234 } 233 }
235 234
@@ -1269,18 +1268,11 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1269static int efx_init_napi(struct efx_nic *efx) 1268static int efx_init_napi(struct efx_nic *efx)
1270{ 1269{
1271 struct efx_channel *channel; 1270 struct efx_channel *channel;
1272 int rc;
1273 1271
1274 efx_for_each_channel(channel, efx) { 1272 efx_for_each_channel(channel, efx) {
1275 channel->napi_dev = efx->net_dev; 1273 channel->napi_dev = efx->net_dev;
1276 rc = efx_lro_init(&channel->lro_mgr, efx);
1277 if (rc)
1278 goto err;
1279 } 1274 }
1280 return 0; 1275 return 0;
1281 err:
1282 efx_fini_napi(efx);
1283 return rc;
1284} 1276}
1285 1277
1286static void efx_fini_napi(struct efx_nic *efx) 1278static void efx_fini_napi(struct efx_nic *efx)
@@ -1288,7 +1280,6 @@ static void efx_fini_napi(struct efx_nic *efx)
1288 struct efx_channel *channel; 1280 struct efx_channel *channel;
1289 1281
1290 efx_for_each_channel(channel, efx) { 1282 efx_for_each_channel(channel, efx) {
1291 efx_lro_fini(&channel->lro_mgr);
1292 channel->napi_dev = NULL; 1283 channel->napi_dev = NULL;
1293 } 1284 }
1294} 1285}
@@ -2097,7 +2088,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2097 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | 2088 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2098 NETIF_F_HIGHDMA | NETIF_F_TSO); 2089 NETIF_F_HIGHDMA | NETIF_F_TSO);
2099 if (lro) 2090 if (lro)
2100 net_dev->features |= NETIF_F_LRO; 2091 net_dev->features |= NETIF_F_GRO;
2101 /* Mask for features that also apply to VLAN devices */ 2092 /* Mask for features that also apply to VLAN devices */
2102 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2093 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2103 NETIF_F_HIGHDMA | NETIF_F_TSO); 2094 NETIF_F_HIGHDMA | NETIF_F_TSO);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 0dd7a532c78a..fb1ac0e63c0b 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -77,7 +77,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
77 channel->channel, raw_smp_processor_id()); 77 channel->channel, raw_smp_processor_id());
78 channel->work_pending = true; 78 channel->work_pending = true;
79 79
80 netif_rx_schedule(&channel->napi_str); 80 napi_schedule(&channel->napi_str);
81} 81}
82 82
83#endif /* EFX_EFX_H */ 83#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 5f255f75754e..8643505788cc 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -25,15 +25,11 @@
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/inet_lro.h>
29#include <linux/i2c.h> 28#include <linux/i2c.h>
30 29
31#include "enum.h" 30#include "enum.h"
32#include "bitfield.h" 31#include "bitfield.h"
33 32
34#define EFX_MAX_LRO_DESCRIPTORS 8
35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
36
37/************************************************************************** 33/**************************************************************************
38 * 34 *
39 * Build definitions 35 * Build definitions
@@ -340,13 +336,10 @@ enum efx_rx_alloc_method {
340 * @eventq_read_ptr: Event queue read pointer 336 * @eventq_read_ptr: Event queue read pointer
341 * @last_eventq_read_ptr: Last event queue read pointer value. 337 * @last_eventq_read_ptr: Last event queue read pointer value.
342 * @eventq_magic: Event queue magic value for driver-generated test events 338 * @eventq_magic: Event queue magic value for driver-generated test events
343 * @lro_mgr: LRO state
344 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 339 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
345 * and diagnostic counters 340 * and diagnostic counters
346 * @rx_alloc_push_pages: RX allocation method currently in use for pushing 341 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
347 * descriptors 342 * descriptors
348 * @rx_alloc_pop_pages: RX allocation method currently in use for popping
349 * descriptors
350 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 343 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
351 * @n_rx_ip_frag_err: Count of RX IP fragment errors 344 * @n_rx_ip_frag_err: Count of RX IP fragment errors
352 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 345 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
@@ -371,10 +364,8 @@ struct efx_channel {
371 unsigned int last_eventq_read_ptr; 364 unsigned int last_eventq_read_ptr;
372 unsigned int eventq_magic; 365 unsigned int eventq_magic;
373 366
374 struct net_lro_mgr lro_mgr;
375 int rx_alloc_level; 367 int rx_alloc_level;
376 int rx_alloc_push_pages; 368 int rx_alloc_push_pages;
377 int rx_alloc_pop_pages;
378 369
379 unsigned n_rx_tobe_disc; 370 unsigned n_rx_tobe_disc;
380 unsigned n_rx_ip_frag_err; 371 unsigned n_rx_ip_frag_err;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index b8ba4bbad889..a0345b380979 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -99,109 +99,6 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
99} 99}
100 100
101 101
102/**************************************************************************
103 *
104 * Linux generic LRO handling
105 *
106 **************************************************************************
107 */
108
109static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
110 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
111{
112 struct efx_channel *channel = priv;
113 struct iphdr *iph;
114 struct tcphdr *th;
115
116 iph = (struct iphdr *)skb->data;
117 if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
118 goto fail;
119
120 th = (struct tcphdr *)(skb->data + iph->ihl * 4);
121
122 *tcpudp_hdr = th;
123 *ip_hdr = iph;
124 *hdr_flags = LRO_IPV4 | LRO_TCP;
125
126 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
127 return 0;
128fail:
129 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
130 return -1;
131}
132
133static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
134 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
135 void *priv)
136{
137 struct efx_channel *channel = priv;
138 struct ethhdr *eh;
139 struct iphdr *iph;
140
141 /* We support EtherII and VLAN encapsulated IPv4 */
142 eh = page_address(frag->page) + frag->page_offset;
143 *mac_hdr = eh;
144
145 if (eh->h_proto == htons(ETH_P_IP)) {
146 iph = (struct iphdr *)(eh + 1);
147 } else {
148 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
149 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
150 goto fail;
151
152 iph = (struct iphdr *)(veh + 1);
153 }
154 *ip_hdr = iph;
155
156 /* We can only do LRO over TCP */
157 if (iph->protocol != IPPROTO_TCP)
158 goto fail;
159
160 *hdr_flags = LRO_IPV4 | LRO_TCP;
161 *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
162
163 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
164 return 0;
165 fail:
166 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
167 return -1;
168}
169
170int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
171{
172 size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
173 struct net_lro_desc *lro_arr;
174
175 /* Allocate the LRO descriptors structure */
176 lro_arr = kzalloc(s, GFP_KERNEL);
177 if (lro_arr == NULL)
178 return -ENOMEM;
179
180 lro_mgr->lro_arr = lro_arr;
181 lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
182 lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
183 lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
184
185 lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
186 lro_mgr->get_frag_header = efx_get_frag_hdr;
187 lro_mgr->dev = efx->net_dev;
188
189 lro_mgr->features = LRO_F_NAPI;
190
191 /* We can pass packets up with the checksum intact */
192 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
193
194 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
195
196 return 0;
197}
198
199void efx_lro_fini(struct net_lro_mgr *lro_mgr)
200{
201 kfree(lro_mgr->lro_arr);
202 lro_mgr->lro_arr = NULL;
203}
204
205/** 102/**
206 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation 103 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
207 * 104 *
@@ -549,77 +446,31 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
549static void efx_rx_packet_lro(struct efx_channel *channel, 446static void efx_rx_packet_lro(struct efx_channel *channel,
550 struct efx_rx_buffer *rx_buf) 447 struct efx_rx_buffer *rx_buf)
551{ 448{
552 struct net_lro_mgr *lro_mgr = &channel->lro_mgr; 449 struct napi_struct *napi = &channel->napi_str;
553 void *priv = channel;
554 450
555 /* Pass the skb/page into the LRO engine */ 451 /* Pass the skb/page into the LRO engine */
556 if (rx_buf->page) { 452 if (rx_buf->page) {
557 struct skb_frag_struct frags; 453 struct napi_gro_fraginfo info;
558 454
559 frags.page = rx_buf->page; 455 info.frags[0].page = rx_buf->page;
560 frags.page_offset = efx_rx_buf_offset(rx_buf); 456 info.frags[0].page_offset = efx_rx_buf_offset(rx_buf);
561 frags.size = rx_buf->len; 457 info.frags[0].size = rx_buf->len;
458 info.nr_frags = 1;
459 info.ip_summed = CHECKSUM_UNNECESSARY;
460 info.len = rx_buf->len;
562 461
563 lro_receive_frags(lro_mgr, &frags, rx_buf->len, 462 napi_gro_frags(napi, &info);
564 rx_buf->len, priv, 0);
565 463
566 EFX_BUG_ON_PARANOID(rx_buf->skb); 464 EFX_BUG_ON_PARANOID(rx_buf->skb);
567 rx_buf->page = NULL; 465 rx_buf->page = NULL;
568 } else { 466 } else {
569 EFX_BUG_ON_PARANOID(!rx_buf->skb); 467 EFX_BUG_ON_PARANOID(!rx_buf->skb);
570 468
571 lro_receive_skb(lro_mgr, rx_buf->skb, priv); 469 napi_gro_receive(napi, rx_buf->skb);
572 rx_buf->skb = NULL; 470 rx_buf->skb = NULL;
573 } 471 }
574} 472}
575 473
576/* Allocate and construct an SKB around a struct page.*/
577static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
578 struct efx_nic *efx,
579 int hdr_len)
580{
581 struct sk_buff *skb;
582
583 /* Allocate an SKB to store the headers */
584 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
585 if (unlikely(skb == NULL)) {
586 EFX_ERR_RL(efx, "RX out of memory for skb\n");
587 return NULL;
588 }
589
590 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
591 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
592
593 skb->ip_summed = CHECKSUM_UNNECESSARY;
594 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
595
596 skb->len = rx_buf->len;
597 skb->truesize = rx_buf->len + sizeof(struct sk_buff);
598 memcpy(skb->data, rx_buf->data, hdr_len);
599 skb->tail += hdr_len;
600
601 /* Append the remaining page onto the frag list */
602 if (unlikely(rx_buf->len > hdr_len)) {
603 struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
604 frag->page = rx_buf->page;
605 frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
606 frag->size = skb->len - hdr_len;
607 skb_shinfo(skb)->nr_frags = 1;
608 skb->data_len = frag->size;
609 } else {
610 __free_pages(rx_buf->page, efx->rx_buffer_order);
611 skb->data_len = 0;
612 }
613
614 /* Ownership has transferred from the rx_buf to skb */
615 rx_buf->page = NULL;
616
617 /* Move past the ethernet header */
618 skb->protocol = eth_type_trans(skb, efx->net_dev);
619
620 return skb;
621}
622
623void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 474void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
624 unsigned int len, bool checksummed, bool discard) 475 unsigned int len, bool checksummed, bool discard)
625{ 476{
@@ -687,7 +538,6 @@ void __efx_rx_packet(struct efx_channel *channel,
687{ 538{
688 struct efx_nic *efx = channel->efx; 539 struct efx_nic *efx = channel->efx;
689 struct sk_buff *skb; 540 struct sk_buff *skb;
690 bool lro = !!(efx->net_dev->features & NETIF_F_LRO);
691 541
692 /* If we're in loopback test, then pass the packet directly to the 542 /* If we're in loopback test, then pass the packet directly to the
693 * loopback layer, and free the rx_buf here 543 * loopback layer, and free the rx_buf here
@@ -709,41 +559,21 @@ void __efx_rx_packet(struct efx_channel *channel,
709 efx->net_dev); 559 efx->net_dev);
710 } 560 }
711 561
712 /* Both our generic-LRO and SFC-SSR support skb and page based 562 if (likely(checksummed || rx_buf->page)) {
713 * allocation, but neither support switching from one to the
714 * other on the fly. If we spot that the allocation mode has
715 * changed, then flush the LRO state.
716 */
717 if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
718 efx_flush_lro(channel);
719 channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
720 }
721 if (likely(checksummed && lro)) {
722 efx_rx_packet_lro(channel, rx_buf); 563 efx_rx_packet_lro(channel, rx_buf);
723 goto done; 564 goto done;
724 } 565 }
725 566
726 /* Form an skb if required */ 567 /* We now own the SKB */
727 if (rx_buf->page) { 568 skb = rx_buf->skb;
728 int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS); 569 rx_buf->skb = NULL;
729 skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
730 if (unlikely(skb == NULL)) {
731 efx_free_rx_buffer(efx, rx_buf);
732 goto done;
733 }
734 } else {
735 /* We now own the SKB */
736 skb = rx_buf->skb;
737 rx_buf->skb = NULL;
738 }
739 570
740 EFX_BUG_ON_PARANOID(rx_buf->page); 571 EFX_BUG_ON_PARANOID(rx_buf->page);
741 EFX_BUG_ON_PARANOID(rx_buf->skb); 572 EFX_BUG_ON_PARANOID(rx_buf->skb);
742 EFX_BUG_ON_PARANOID(!skb); 573 EFX_BUG_ON_PARANOID(!skb);
743 574
744 /* Set the SKB flags */ 575 /* Set the SKB flags */
745 if (unlikely(!checksummed || !efx->rx_checksum_enabled)) 576 skb->ip_summed = CHECKSUM_NONE;
746 skb->ip_summed = CHECKSUM_NONE;
747 577
748 /* Pass the packet up */ 578 /* Pass the packet up */
749 netif_receive_skb(skb); 579 netif_receive_skb(skb);
@@ -760,7 +590,7 @@ void efx_rx_strategy(struct efx_channel *channel)
760 enum efx_rx_alloc_method method = rx_alloc_method; 590 enum efx_rx_alloc_method method = rx_alloc_method;
761 591
762 /* Only makes sense to use page based allocation if LRO is enabled */ 592 /* Only makes sense to use page based allocation if LRO is enabled */
763 if (!(channel->efx->net_dev->features & NETIF_F_LRO)) { 593 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
764 method = RX_ALLOC_METHOD_SKB; 594 method = RX_ALLOC_METHOD_SKB;
765 } else if (method == RX_ALLOC_METHOD_AUTO) { 595 } else if (method == RX_ALLOC_METHOD_AUTO) {
766 /* Constrain the rx_alloc_level */ 596 /* Constrain the rx_alloc_level */
@@ -865,11 +695,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
865 rx_queue->buffer = NULL; 695 rx_queue->buffer = NULL;
866} 696}
867 697
868void efx_flush_lro(struct efx_channel *channel)
869{
870 lro_flush_all(&channel->lro_mgr);
871}
872
873 698
874module_param(rx_alloc_method, int, 0644); 699module_param(rx_alloc_method, int, 0644);
875MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); 700MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
index 0e88a9ddc1c6..42ee7555a80b 100644
--- a/drivers/net/sfc/rx.h
+++ b/drivers/net/sfc/rx.h
@@ -17,9 +17,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17void efx_init_rx_queue(struct efx_rx_queue *rx_queue); 17void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19 19
20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
21void efx_lro_fini(struct net_lro_mgr *lro_mgr);
22void efx_flush_lro(struct efx_channel *channel);
23void efx_rx_strategy(struct efx_channel *channel); 20void efx_rx_strategy(struct efx_channel *channel);
24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 21void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
25void efx_rx_work(struct work_struct *data); 22void efx_rx_work(struct work_struct *data);
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 16b80acb9992..d21d014bf0c1 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/rtnetlink.h>
27#include "net_driver.h" 28#include "net_driver.h"
28#include "efx.h" 29#include "efx.h"
29#include "phy.h" 30#include "phy.h"
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 9ecb77da9545..f1365097b4fd 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/rtnetlink.h>
11#include <linux/seq_file.h> 12#include <linux/seq_file.h>
12#include "efx.h" 13#include "efx.h"
13#include "mdio_10g.h" 14#include "mdio_10g.h"
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index c9dbb06f8c94..952d37ffee51 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3214 unsigned long flags; 3214 unsigned long flags;
3215 3215
3216 spin_lock_irqsave(&hw->hw_lock, flags); 3216 spin_lock_irqsave(&hw->hw_lock, flags);
3217 __netif_rx_complete(napi); 3217 __napi_complete(napi);
3218 hw->intr_mask |= napimask[skge->port]; 3218 hw->intr_mask |= napimask[skge->port];
3219 skge_write32(hw, B0_IMSK, hw->intr_mask); 3219 skge_write32(hw, B0_IMSK, hw->intr_mask);
3220 skge_read32(hw, B0_IMSK); 3220 skge_read32(hw, B0_IMSK);
@@ -3377,7 +3377,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3377 if (status & (IS_XA1_F|IS_R1_F)) { 3377 if (status & (IS_XA1_F|IS_R1_F)) {
3378 struct skge_port *skge = netdev_priv(hw->dev[0]); 3378 struct skge_port *skge = netdev_priv(hw->dev[0]);
3379 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); 3379 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
3380 netif_rx_schedule(&skge->napi); 3380 napi_schedule(&skge->napi);
3381 } 3381 }
3382 3382
3383 if (status & IS_PA_TO_TX1) 3383 if (status & IS_PA_TO_TX1)
@@ -3397,7 +3397,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3397 3397
3398 if (status & (IS_XA2_F|IS_R2_F)) { 3398 if (status & (IS_XA2_F|IS_R2_F)) {
3399 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); 3399 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
3400 netif_rx_schedule(&skge->napi); 3400 napi_schedule(&skge->napi);
3401 } 3401 }
3402 3402
3403 if (status & IS_PA_TO_RX2) { 3403 if (status & IS_PA_TO_RX2) {
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index b215a8d85e62..508e8da2f65f 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1643,6 +1643,117 @@ static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level)
1643 lp->msg_enable = level; 1643 lp->msg_enable = level;
1644} 1644}
1645 1645
1646static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word)
1647{
1648 u16 ctl;
1649 struct smc_local *lp = netdev_priv(dev);
1650 void __iomem *ioaddr = lp->base;
1651
1652 spin_lock_irq(&lp->lock);
1653 /* load word into GP register */
1654 SMC_SELECT_BANK(lp, 1);
1655 SMC_SET_GP(lp, word);
1656 /* set the address to put the data in EEPROM */
1657 SMC_SELECT_BANK(lp, 2);
1658 SMC_SET_PTR(lp, addr);
1659 /* tell it to write */
1660 SMC_SELECT_BANK(lp, 1);
1661 ctl = SMC_GET_CTL(lp);
1662 SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE));
1663 /* wait for it to finish */
1664 do {
1665 udelay(1);
1666 } while (SMC_GET_CTL(lp) & CTL_STORE);
1667 /* clean up */
1668 SMC_SET_CTL(lp, ctl);
1669 SMC_SELECT_BANK(lp, 2);
1670 spin_unlock_irq(&lp->lock);
1671 return 0;
1672}
1673
1674static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word)
1675{
1676 u16 ctl;
1677 struct smc_local *lp = netdev_priv(dev);
1678 void __iomem *ioaddr = lp->base;
1679
1680 spin_lock_irq(&lp->lock);
1681 /* set the EEPROM address to get the data from */
1682 SMC_SELECT_BANK(lp, 2);
1683 SMC_SET_PTR(lp, addr | PTR_READ);
1684 /* tell it to load */
1685 SMC_SELECT_BANK(lp, 1);
1686 SMC_SET_GP(lp, 0xffff); /* init to known */
1687 ctl = SMC_GET_CTL(lp);
1688 SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD));
1689 /* wait for it to finish */
1690 do {
1691 udelay(1);
1692 } while (SMC_GET_CTL(lp) & CTL_RELOAD);
1693 /* read word from GP register */
1694 *word = SMC_GET_GP(lp);
1695 /* clean up */
1696 SMC_SET_CTL(lp, ctl);
1697 SMC_SELECT_BANK(lp, 2);
1698 spin_unlock_irq(&lp->lock);
1699 return 0;
1700}
1701
1702static int smc_ethtool_geteeprom_len(struct net_device *dev)
1703{
1704 return 0x23 * 2;
1705}
1706
1707static int smc_ethtool_geteeprom(struct net_device *dev,
1708 struct ethtool_eeprom *eeprom, u8 *data)
1709{
1710 int i;
1711 int imax;
1712
1713 DBG(1, "Reading %d bytes at %d(0x%x)\n",
1714 eeprom->len, eeprom->offset, eeprom->offset);
1715 imax = smc_ethtool_geteeprom_len(dev);
1716 for (i = 0; i < eeprom->len; i += 2) {
1717 int ret;
1718 u16 wbuf;
1719 int offset = i + eeprom->offset;
1720 if (offset > imax)
1721 break;
1722 ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
1723 if (ret != 0)
1724 return ret;
1725 DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
1726 data[i] = (wbuf >> 8) & 0xff;
1727 data[i+1] = wbuf & 0xff;
1728 }
1729 return 0;
1730}
1731
1732static int smc_ethtool_seteeprom(struct net_device *dev,
1733 struct ethtool_eeprom *eeprom, u8 *data)
1734{
1735 int i;
1736 int imax;
1737
1738 DBG(1, "Writing %d bytes to %d(0x%x)\n",
1739 eeprom->len, eeprom->offset, eeprom->offset);
1740 imax = smc_ethtool_geteeprom_len(dev);
1741 for (i = 0; i < eeprom->len; i += 2) {
1742 int ret;
1743 u16 wbuf;
1744 int offset = i + eeprom->offset;
1745 if (offset > imax)
1746 break;
1747 wbuf = (data[i] << 8) | data[i + 1];
1748 DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
1749 ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
1750 if (ret != 0)
1751 return ret;
1752 }
1753 return 0;
1754}
1755
1756
1646static const struct ethtool_ops smc_ethtool_ops = { 1757static const struct ethtool_ops smc_ethtool_ops = {
1647 .get_settings = smc_ethtool_getsettings, 1758 .get_settings = smc_ethtool_getsettings,
1648 .set_settings = smc_ethtool_setsettings, 1759 .set_settings = smc_ethtool_setsettings,
@@ -1652,8 +1763,9 @@ static const struct ethtool_ops smc_ethtool_ops = {
1652 .set_msglevel = smc_ethtool_setmsglevel, 1763 .set_msglevel = smc_ethtool_setmsglevel,
1653 .nway_reset = smc_ethtool_nwayreset, 1764 .nway_reset = smc_ethtool_nwayreset,
1654 .get_link = ethtool_op_get_link, 1765 .get_link = ethtool_op_get_link,
1655// .get_eeprom = smc_ethtool_geteeprom, 1766 .get_eeprom_len = smc_ethtool_geteeprom_len,
1656// .set_eeprom = smc_ethtool_seteeprom, 1767 .get_eeprom = smc_ethtool_geteeprom,
1768 .set_eeprom = smc_ethtool_seteeprom,
1657}; 1769};
1658 1770
1659/* 1771/*
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index c4ccd121bc9c..ed9ae43523a1 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -1141,6 +1141,16 @@ static const char * chip_ids[ 16 ] = {
1141 1141
1142#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp)) 1142#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp))
1143 1143
1144#define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp))
1145
1146#define SMC_SET_GP(lp, x) \
1147 do { \
1148 if (SMC_MUST_ALIGN_WRITE(lp)) \
1149 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \
1150 else \
1151 SMC_outw(x, ioaddr, GP_REG(lp)); \
1152 } while (0)
1153
1144#define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp)) 1154#define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp))
1145 1155
1146#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp)) 1156#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp))
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index f513bdf1c887..d271ae39c6f3 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -984,7 +984,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
984 /* We processed all packets available. Tell NAPI it can 984 /* We processed all packets available. Tell NAPI it can
985 * stop polling then re-enable rx interrupts */ 985 * stop polling then re-enable rx interrupts */
986 smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); 986 smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_);
987 netif_rx_complete(napi); 987 napi_complete(napi);
988 temp = smsc911x_reg_read(pdata, INT_EN); 988 temp = smsc911x_reg_read(pdata, INT_EN);
989 temp |= INT_EN_RSFL_EN_; 989 temp |= INT_EN_RSFL_EN_;
990 smsc911x_reg_write(pdata, INT_EN, temp); 990 smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1485,16 +1485,16 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1485 } 1485 }
1486 1486
1487 if (likely(intsts & inten & INT_STS_RSFL_)) { 1487 if (likely(intsts & inten & INT_STS_RSFL_)) {
1488 if (likely(netif_rx_schedule_prep(&pdata->napi))) { 1488 if (likely(napi_schedule_prep(&pdata->napi))) {
1489 /* Disable Rx interrupts */ 1489 /* Disable Rx interrupts */
1490 temp = smsc911x_reg_read(pdata, INT_EN); 1490 temp = smsc911x_reg_read(pdata, INT_EN);
1491 temp &= (~INT_EN_RSFL_EN_); 1491 temp &= (~INT_EN_RSFL_EN_);
1492 smsc911x_reg_write(pdata, INT_EN, temp); 1492 smsc911x_reg_write(pdata, INT_EN, temp);
1493 /* Schedule a NAPI poll */ 1493 /* Schedule a NAPI poll */
1494 __netif_rx_schedule(&pdata->napi); 1494 __napi_schedule(&pdata->napi);
1495 } else { 1495 } else {
1496 SMSC_WARNING(RX_ERR, 1496 SMSC_WARNING(RX_ERR,
1497 "netif_rx_schedule_prep failed"); 1497 "napi_schedule_prep failed");
1498 } 1498 }
1499 serviced = IRQ_HANDLED; 1499 serviced = IRQ_HANDLED;
1500 } 1500 }
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index c14a4c6452c7..79f4c228b030 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -666,7 +666,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
666 smsc9420_pci_flush_write(pd); 666 smsc9420_pci_flush_write(pd);
667 667
668 ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_); 668 ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_);
669 netif_rx_schedule(&pd->napi); 669 napi_schedule(&pd->napi);
670 } 670 }
671 671
672 if (ints_to_clear) 672 if (ints_to_clear)
@@ -889,7 +889,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
889 smsc9420_pci_flush_write(pd); 889 smsc9420_pci_flush_write(pd);
890 890
891 if (work_done < budget) { 891 if (work_done < budget) {
892 netif_rx_complete(&pd->napi); 892 napi_complete(&pd->napi);
893 893
894 /* re-enable RX DMA interrupts */ 894 /* re-enable RX DMA interrupts */
895 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); 895 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 88d2c67788df..7f6b4a4052ee 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1301,7 +1301,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
1301 /* if all packets are in the stack, enable interrupts and return 0 */ 1301 /* if all packets are in the stack, enable interrupts and return 0 */
1302 /* if not, return 1 */ 1302 /* if not, return 1 */
1303 if (packets_done < budget) { 1303 if (packets_done < budget) {
1304 netif_rx_complete(napi); 1304 napi_complete(napi);
1305 spider_net_rx_irq_on(card); 1305 spider_net_rx_irq_on(card);
1306 card->ignore_rx_ramfull = 0; 1306 card->ignore_rx_ramfull = 0;
1307 } 1307 }
@@ -1528,7 +1528,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1528 spider_net_refill_rx_chain(card); 1528 spider_net_refill_rx_chain(card);
1529 spider_net_enable_rxdmac(card); 1529 spider_net_enable_rxdmac(card);
1530 card->num_rx_ints ++; 1530 card->num_rx_ints ++;
1531 netif_rx_schedule(&card->napi); 1531 napi_schedule(&card->napi);
1532 } 1532 }
1533 show_error = 0; 1533 show_error = 0;
1534 break; 1534 break;
@@ -1548,7 +1548,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1548 spider_net_refill_rx_chain(card); 1548 spider_net_refill_rx_chain(card);
1549 spider_net_enable_rxdmac(card); 1549 spider_net_enable_rxdmac(card);
1550 card->num_rx_ints ++; 1550 card->num_rx_ints ++;
1551 netif_rx_schedule(&card->napi); 1551 napi_schedule(&card->napi);
1552 show_error = 0; 1552 show_error = 0;
1553 break; 1553 break;
1554 1554
@@ -1562,7 +1562,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1562 spider_net_refill_rx_chain(card); 1562 spider_net_refill_rx_chain(card);
1563 spider_net_enable_rxdmac(card); 1563 spider_net_enable_rxdmac(card);
1564 card->num_rx_ints ++; 1564 card->num_rx_ints ++;
1565 netif_rx_schedule(&card->napi); 1565 napi_schedule(&card->napi);
1566 show_error = 0; 1566 show_error = 0;
1567 break; 1567 break;
1568 1568
@@ -1656,11 +1656,11 @@ spider_net_interrupt(int irq, void *ptr)
1656 1656
1657 if (status_reg & SPIDER_NET_RXINT ) { 1657 if (status_reg & SPIDER_NET_RXINT ) {
1658 spider_net_rx_irq_off(card); 1658 spider_net_rx_irq_off(card);
1659 netif_rx_schedule(&card->napi); 1659 napi_schedule(&card->napi);
1660 card->num_rx_ints ++; 1660 card->num_rx_ints ++;
1661 } 1661 }
1662 if (status_reg & SPIDER_NET_TXINT) 1662 if (status_reg & SPIDER_NET_TXINT)
1663 netif_rx_schedule(&card->napi); 1663 napi_schedule(&card->napi);
1664 1664
1665 if (status_reg & SPIDER_NET_LINKINT) 1665 if (status_reg & SPIDER_NET_LINKINT)
1666 spider_net_link_reset(netdev); 1666 spider_net_link_reset(netdev);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index da3a76b18eff..98fe79515bab 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1342,8 +1342,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1342 if (intr_status & (IntrRxDone | IntrRxEmpty)) { 1342 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1343 u32 enable; 1343 u32 enable;
1344 1344
1345 if (likely(netif_rx_schedule_prep(&np->napi))) { 1345 if (likely(napi_schedule_prep(&np->napi))) {
1346 __netif_rx_schedule(&np->napi); 1346 __napi_schedule(&np->napi);
1347 enable = readl(ioaddr + IntrEnable); 1347 enable = readl(ioaddr + IntrEnable);
1348 enable &= ~(IntrRxDone | IntrRxEmpty); 1348 enable &= ~(IntrRxDone | IntrRxEmpty);
1349 writel(enable, ioaddr + IntrEnable); 1349 writel(enable, ioaddr + IntrEnable);
@@ -1587,7 +1587,7 @@ static int netdev_poll(struct napi_struct *napi, int budget)
1587 intr_status = readl(ioaddr + IntrStatus); 1587 intr_status = readl(ioaddr + IntrStatus);
1588 } while (intr_status & (IntrRxDone | IntrRxEmpty)); 1588 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1589 1589
1590 netif_rx_complete(napi); 1590 napi_complete(napi);
1591 intr_status = readl(ioaddr + IntrEnable); 1591 intr_status = readl(ioaddr + IntrEnable);
1592 intr_status |= IntrRxDone | IntrRxEmpty; 1592 intr_status |= IntrRxDone | IntrRxEmpty;
1593 writel(intr_status, ioaddr + IntrEnable); 1593 writel(intr_status, ioaddr + IntrEnable);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 86c765d83de1..4942059109f3 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -921,7 +921,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
921 gp->status = readl(gp->regs + GREG_STAT); 921 gp->status = readl(gp->regs + GREG_STAT);
922 } while (gp->status & GREG_STAT_NAPI); 922 } while (gp->status & GREG_STAT_NAPI);
923 923
924 __netif_rx_complete(napi); 924 __napi_complete(napi);
925 gem_enable_ints(gp); 925 gem_enable_ints(gp);
926 926
927 spin_unlock_irqrestore(&gp->lock, flags); 927 spin_unlock_irqrestore(&gp->lock, flags);
@@ -944,7 +944,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
944 944
945 spin_lock_irqsave(&gp->lock, flags); 945 spin_lock_irqsave(&gp->lock, flags);
946 946
947 if (netif_rx_schedule_prep(&gp->napi)) { 947 if (napi_schedule_prep(&gp->napi)) {
948 u32 gem_status = readl(gp->regs + GREG_STAT); 948 u32 gem_status = readl(gp->regs + GREG_STAT);
949 949
950 if (gem_status == 0) { 950 if (gem_status == 0) {
@@ -954,7 +954,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
954 } 954 }
955 gp->status = gem_status; 955 gp->status = gem_status;
956 gem_disable_ints(gp); 956 gem_disable_ints(gp);
957 __netif_rx_schedule(&gp->napi); 957 __napi_schedule(&gp->napi);
958 } 958 }
959 959
960 spin_unlock_irqrestore(&gp->lock, flags); 960 spin_unlock_irqrestore(&gp->lock, flags);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index bcd0e60cbda9..f42c67e93bf4 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1609,8 +1609,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1609 if (!(dmactl & DMA_IntMask)) { 1609 if (!(dmactl & DMA_IntMask)) {
1610 /* disable interrupts */ 1610 /* disable interrupts */
1611 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); 1611 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
1612 if (netif_rx_schedule_prep(&lp->napi)) 1612 if (napi_schedule_prep(&lp->napi))
1613 __netif_rx_schedule(&lp->napi); 1613 __napi_schedule(&lp->napi);
1614 else { 1614 else {
1615 printk(KERN_ERR "%s: interrupt taken in poll\n", 1615 printk(KERN_ERR "%s: interrupt taken in poll\n",
1616 dev->name); 1616 dev->name);
@@ -1919,7 +1919,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1919 spin_unlock(&lp->lock); 1919 spin_unlock(&lp->lock);
1920 1920
1921 if (received < budget) { 1921 if (received < budget) {
1922 netif_rx_complete(napi); 1922 napi_complete(napi);
1923 /* enable interrupts */ 1923 /* enable interrupts */
1924 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); 1924 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
1925 } 1925 }
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index a7a4dc4d6313..be9f38f8f0bf 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -265,8 +265,8 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
265 bdx_isr_extra(priv, isr); 265 bdx_isr_extra(priv, isr);
266 266
267 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { 267 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
268 if (likely(netif_rx_schedule_prep(&priv->napi))) { 268 if (likely(napi_schedule_prep(&priv->napi))) {
269 __netif_rx_schedule(&priv->napi); 269 __napi_schedule(&priv->napi);
270 RET(IRQ_HANDLED); 270 RET(IRQ_HANDLED);
271 } else { 271 } else {
272 /* NOTE: we get here if intr has slipped into window 272 /* NOTE: we get here if intr has slipped into window
@@ -302,7 +302,7 @@ static int bdx_poll(struct napi_struct *napi, int budget)
302 * device lock and allow waiting tasks (eg rmmod) to advance) */ 302 * device lock and allow waiting tasks (eg rmmod) to advance) */
303 priv->napi_stop = 0; 303 priv->napi_stop = 0;
304 304
305 netif_rx_complete(napi); 305 napi_complete(napi);
306 bdx_enable_interrupts(priv); 306 bdx_enable_interrupts(priv);
307 } 307 }
308 return work_done; 308 return work_done;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 8b3f84685387..5b3d60568d55 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -860,7 +860,7 @@ static int tg3_bmcr_reset(struct tg3 *tp)
860 860
861static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 861static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
862{ 862{
863 struct tg3 *tp = (struct tg3 *)bp->priv; 863 struct tg3 *tp = bp->priv;
864 u32 val; 864 u32 val;
865 865
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
@@ -874,7 +874,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
874 874
875static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 875static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
876{ 876{
877 struct tg3 *tp = (struct tg3 *)bp->priv; 877 struct tg3 *tp = bp->priv;
878 878
879 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 879 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
880 return -EAGAIN; 880 return -EAGAIN;
@@ -4460,7 +4460,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4460 sblk->status &= ~SD_STATUS_UPDATED; 4460 sblk->status &= ~SD_STATUS_UPDATED;
4461 4461
4462 if (likely(!tg3_has_work(tp))) { 4462 if (likely(!tg3_has_work(tp))) {
4463 netif_rx_complete(napi); 4463 napi_complete(napi);
4464 tg3_restart_ints(tp); 4464 tg3_restart_ints(tp);
4465 break; 4465 break;
4466 } 4466 }
@@ -4470,7 +4470,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4470 4470
4471tx_recovery: 4471tx_recovery:
4472 /* work_done is guaranteed to be less than budget. */ 4472 /* work_done is guaranteed to be less than budget. */
4473 netif_rx_complete(napi); 4473 napi_complete(napi);
4474 schedule_work(&tp->reset_task); 4474 schedule_work(&tp->reset_task);
4475 return work_done; 4475 return work_done;
4476} 4476}
@@ -4519,7 +4519,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4519 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4519 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4520 4520
4521 if (likely(!tg3_irq_sync(tp))) 4521 if (likely(!tg3_irq_sync(tp)))
4522 netif_rx_schedule(&tp->napi); 4522 napi_schedule(&tp->napi);
4523 4523
4524 return IRQ_HANDLED; 4524 return IRQ_HANDLED;
4525} 4525}
@@ -4544,7 +4544,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
4544 */ 4544 */
4545 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 4545 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4546 if (likely(!tg3_irq_sync(tp))) 4546 if (likely(!tg3_irq_sync(tp)))
4547 netif_rx_schedule(&tp->napi); 4547 napi_schedule(&tp->napi);
4548 4548
4549 return IRQ_RETVAL(1); 4549 return IRQ_RETVAL(1);
4550} 4550}
@@ -4586,7 +4586,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4586 sblk->status &= ~SD_STATUS_UPDATED; 4586 sblk->status &= ~SD_STATUS_UPDATED;
4587 if (likely(tg3_has_work(tp))) { 4587 if (likely(tg3_has_work(tp))) {
4588 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4588 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4589 netif_rx_schedule(&tp->napi); 4589 napi_schedule(&tp->napi);
4590 } else { 4590 } else {
4591 /* No work, shared interrupt perhaps? re-enable 4591 /* No work, shared interrupt perhaps? re-enable
4592 * interrupts, and flush that PCI write 4592 * interrupts, and flush that PCI write
@@ -4632,7 +4632,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4632 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 4632 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4633 if (tg3_irq_sync(tp)) 4633 if (tg3_irq_sync(tp))
4634 goto out; 4634 goto out;
4635 if (netif_rx_schedule_prep(&tp->napi)) { 4635 if (napi_schedule_prep(&tp->napi)) {
4636 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4636 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4637 /* Update last_tag to mark that this status has been 4637 /* Update last_tag to mark that this status has been
4638 * seen. Because interrupt may be shared, we may be 4638 * seen. Because interrupt may be shared, we may be
@@ -4640,7 +4640,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4640 * if tg3_poll() is not scheduled. 4640 * if tg3_poll() is not scheduled.
4641 */ 4641 */
4642 tp->last_tag = sblk->status_tag; 4642 tp->last_tag = sblk->status_tag;
4643 __netif_rx_schedule(&tp->napi); 4643 __napi_schedule(&tp->napi);
4644 } 4644 }
4645out: 4645out:
4646 return IRQ_RETVAL(handled); 4646 return IRQ_RETVAL(handled);
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 43853e3b210e..4a65fc2dd928 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -274,6 +274,15 @@ static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value)
274 274
275 return ; 275 return ;
276} 276}
277
278static const struct net_device_ops xl_netdev_ops = {
279 .ndo_open = xl_open,
280 .ndo_stop = xl_close,
281 .ndo_start_xmit = xl_xmit,
282 .ndo_change_mtu = xl_change_mtu,
283 .ndo_set_multicast_list = xl_set_rx_mode,
284 .ndo_set_mac_address = xl_set_mac_address,
285};
277 286
278static int __devinit xl_probe(struct pci_dev *pdev, 287static int __devinit xl_probe(struct pci_dev *pdev,
279 const struct pci_device_id *ent) 288 const struct pci_device_id *ent)
@@ -337,13 +346,7 @@ static int __devinit xl_probe(struct pci_dev *pdev,
337 return i ; 346 return i ;
338 } 347 }
339 348
340 dev->open=&xl_open; 349 dev->netdev_ops = &xl_netdev_ops;
341 dev->hard_start_xmit=&xl_xmit;
342 dev->change_mtu=&xl_change_mtu;
343 dev->stop=&xl_close;
344 dev->do_ioctl=NULL;
345 dev->set_multicast_list=&xl_set_rx_mode;
346 dev->set_mac_address=&xl_set_mac_address ;
347 SET_NETDEV_DEV(dev, &pdev->dev); 350 SET_NETDEV_DEV(dev, &pdev->dev);
348 351
349 pci_set_drvdata(pdev,dev) ; 352 pci_set_drvdata(pdev,dev) ;
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b566d6d79ecd..b9db1b5a58a3 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -92,6 +92,8 @@ static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned
92 outw(val, dev->base_addr + reg); 92 outw(val, dev->base_addr + reg);
93} 93}
94 94
95static struct net_device_ops abyss_netdev_ops;
96
95static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent) 97static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
96{ 98{
97 static int versionprinted; 99 static int versionprinted;
@@ -157,8 +159,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_
157 159
158 memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1); 160 memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1);
159 161
160 dev->open = abyss_open; 162 dev->netdev_ops = &abyss_netdev_ops;
161 dev->stop = abyss_close;
162 163
163 pci_set_drvdata(pdev, dev); 164 pci_set_drvdata(pdev, dev);
164 SET_NETDEV_DEV(dev, &pdev->dev); 165 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -450,6 +451,11 @@ static struct pci_driver abyss_driver = {
450 451
451static int __init abyss_init (void) 452static int __init abyss_init (void)
452{ 453{
454 abyss_netdev_ops = tms380tr_netdev_ops;
455
456 abyss_netdev_ops.ndo_open = abyss_open;
457 abyss_netdev_ops.ndo_stop = abyss_close;
458
453 return pci_register_driver(&abyss_driver); 459 return pci_register_driver(&abyss_driver);
454} 460}
455 461
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index fa7bce6e0c6d..9d896116cf76 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -200,7 +200,6 @@ static void tr_rx(struct net_device *dev);
200static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev); 200static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev);
201static void tok_rerun(unsigned long dev_addr); 201static void tok_rerun(unsigned long dev_addr);
202static void ibmtr_readlog(struct net_device *dev); 202static void ibmtr_readlog(struct net_device *dev);
203static struct net_device_stats *tok_get_stats(struct net_device *dev);
204static int ibmtr_change_mtu(struct net_device *dev, int mtu); 203static int ibmtr_change_mtu(struct net_device *dev, int mtu);
205static void find_turbo_adapters(int *iolist); 204static void find_turbo_adapters(int *iolist);
206 205
@@ -816,18 +815,21 @@ static unsigned char __devinit get_sram_size(struct tok_info *adapt_info)
816 815
817/*****************************************************************************/ 816/*****************************************************************************/
818 817
818static const struct net_device_ops trdev_netdev_ops = {
819 .ndo_open = tok_open,
820 .ndo_stop = tok_close,
821 .ndo_start_xmit = tok_send_packet,
822 .ndo_set_multicast_list = tok_set_multicast_list,
823 .ndo_change_mtu = ibmtr_change_mtu,
824};
825
819static int __devinit trdev_init(struct net_device *dev) 826static int __devinit trdev_init(struct net_device *dev)
820{ 827{
821 struct tok_info *ti = netdev_priv(dev); 828 struct tok_info *ti = netdev_priv(dev);
822 829
823 SET_PAGE(ti->srb_page); 830 SET_PAGE(ti->srb_page);
824 ti->open_failure = NO ; 831 ti->open_failure = NO ;
825 dev->open = tok_open; 832 dev->netdev_ops = &trdev_netdev_ops;
826 dev->stop = tok_close;
827 dev->hard_start_xmit = tok_send_packet;
828 dev->get_stats = tok_get_stats;
829 dev->set_multicast_list = tok_set_multicast_list;
830 dev->change_mtu = ibmtr_change_mtu;
831 833
832 return 0; 834 return 0;
833} 835}
@@ -1460,7 +1462,7 @@ static irqreturn_t tok_interrupt(int irq, void *dev_id)
1460 "%02X\n", 1462 "%02X\n",
1461 (int)retcode, (int)readb(ti->ssb + 6)); 1463 (int)retcode, (int)readb(ti->ssb + 6));
1462 else 1464 else
1463 ti->tr_stats.tx_packets++; 1465 dev->stats.tx_packets++;
1464 break; 1466 break;
1465 case XMIT_XID_CMD: 1467 case XMIT_XID_CMD:
1466 DPRINTK("xmit xid ret_code: %02X\n", 1468 DPRINTK("xmit xid ret_code: %02X\n",
@@ -1646,7 +1648,7 @@ static void tr_tx(struct net_device *dev)
1646 break; 1648 break;
1647 } 1649 }
1648 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); 1650 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1649 ti->tr_stats.tx_bytes += ti->current_skb->len; 1651 dev->stats.tx_bytes += ti->current_skb->len;
1650 dev_kfree_skb_irq(ti->current_skb); 1652 dev_kfree_skb_irq(ti->current_skb);
1651 ti->current_skb = NULL; 1653 ti->current_skb = NULL;
1652 netif_wake_queue(dev); 1654 netif_wake_queue(dev);
@@ -1722,7 +1724,7 @@ static void tr_rx(struct net_device *dev)
1722 if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) { 1724 if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) {
1723 SET_PAGE(ti->asb_page); 1725 SET_PAGE(ti->asb_page);
1724 writeb(DATA_LOST, ti->asb + RETCODE_OFST); 1726 writeb(DATA_LOST, ti->asb + RETCODE_OFST);
1725 ti->tr_stats.rx_dropped++; 1727 dev->stats.rx_dropped++;
1726 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); 1728 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1727 return; 1729 return;
1728 } 1730 }
@@ -1757,7 +1759,7 @@ static void tr_rx(struct net_device *dev)
1757 1759
1758 if (!(skb = dev_alloc_skb(skb_size))) { 1760 if (!(skb = dev_alloc_skb(skb_size))) {
1759 DPRINTK("out of memory. frame dropped.\n"); 1761 DPRINTK("out of memory. frame dropped.\n");
1760 ti->tr_stats.rx_dropped++; 1762 dev->stats.rx_dropped++;
1761 SET_PAGE(ti->asb_page); 1763 SET_PAGE(ti->asb_page);
1762 writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); 1764 writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
1763 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); 1765 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
@@ -1813,8 +1815,8 @@ static void tr_rx(struct net_device *dev)
1813 1815
1814 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); 1816 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1815 1817
1816 ti->tr_stats.rx_bytes += skb->len; 1818 dev->stats.rx_bytes += skb->len;
1817 ti->tr_stats.rx_packets++; 1819 dev->stats.rx_packets++;
1818 1820
1819 skb->protocol = tr_type_trans(skb, dev); 1821 skb->protocol = tr_type_trans(skb, dev);
1820 if (IPv4_p) { 1822 if (IPv4_p) {
@@ -1876,21 +1878,6 @@ static void ibmtr_readlog(struct net_device *dev)
1876 1878
1877/*****************************************************************************/ 1879/*****************************************************************************/
1878 1880
1879/* tok_get_stats(): Basically a scaffold routine which will return
1880 the address of the tr_statistics structure associated with
1881 this device -- the tr.... structure is an ethnet look-alike
1882 so at least for this iteration may suffice. */
1883
1884static struct net_device_stats *tok_get_stats(struct net_device *dev)
1885{
1886
1887 struct tok_info *toki;
1888 toki = netdev_priv(dev);
1889 return (struct net_device_stats *) &toki->tr_stats;
1890}
1891
1892/*****************************************************************************/
1893
1894static int ibmtr_change_mtu(struct net_device *dev, int mtu) 1881static int ibmtr_change_mtu(struct net_device *dev, int mtu)
1895{ 1882{
1896 struct tok_info *ti = netdev_priv(dev); 1883 struct tok_info *ti = netdev_priv(dev);
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 239c75217b12..0b2b7925da22 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -207,7 +207,6 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev);
207static int streamer_close(struct net_device *dev); 207static int streamer_close(struct net_device *dev);
208static void streamer_set_rx_mode(struct net_device *dev); 208static void streamer_set_rx_mode(struct net_device *dev);
209static irqreturn_t streamer_interrupt(int irq, void *dev_id); 209static irqreturn_t streamer_interrupt(int irq, void *dev_id);
210static struct net_device_stats *streamer_get_stats(struct net_device *dev);
211static int streamer_set_mac_address(struct net_device *dev, void *addr); 210static int streamer_set_mac_address(struct net_device *dev, void *addr);
212static void streamer_arb_cmd(struct net_device *dev); 211static void streamer_arb_cmd(struct net_device *dev);
213static int streamer_change_mtu(struct net_device *dev, int mtu); 212static int streamer_change_mtu(struct net_device *dev, int mtu);
@@ -222,6 +221,18 @@ struct streamer_private *dev_streamer=NULL;
222#endif 221#endif
223#endif 222#endif
224 223
224static const struct net_device_ops streamer_netdev_ops = {
225 .ndo_open = streamer_open,
226 .ndo_stop = streamer_close,
227 .ndo_start_xmit = streamer_xmit,
228 .ndo_change_mtu = streamer_change_mtu,
229#if STREAMER_IOCTL
230 .ndo_do_ioctl = streamer_ioctl,
231#endif
232 .ndo_set_multicast_list = streamer_set_rx_mode,
233 .ndo_set_mac_address = streamer_set_mac_address,
234};
235
225static int __devinit streamer_init_one(struct pci_dev *pdev, 236static int __devinit streamer_init_one(struct pci_dev *pdev,
226 const struct pci_device_id *ent) 237 const struct pci_device_id *ent)
227{ 238{
@@ -321,18 +332,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
321 init_waitqueue_head(&streamer_priv->srb_wait); 332 init_waitqueue_head(&streamer_priv->srb_wait);
322 init_waitqueue_head(&streamer_priv->trb_wait); 333 init_waitqueue_head(&streamer_priv->trb_wait);
323 334
324 dev->open = &streamer_open; 335 dev->netdev_ops = &streamer_netdev_ops;
325 dev->hard_start_xmit = &streamer_xmit;
326 dev->change_mtu = &streamer_change_mtu;
327 dev->stop = &streamer_close;
328#if STREAMER_IOCTL
329 dev->do_ioctl = &streamer_ioctl;
330#else
331 dev->do_ioctl = NULL;
332#endif
333 dev->set_multicast_list = &streamer_set_rx_mode;
334 dev->get_stats = &streamer_get_stats;
335 dev->set_mac_address = &streamer_set_mac_address;
336 dev->irq = pdev->irq; 336 dev->irq = pdev->irq;
337 dev->base_addr=pio_start; 337 dev->base_addr=pio_start;
338 SET_NETDEV_DEV(dev, &pdev->dev); 338 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -937,7 +937,7 @@ static void streamer_rx(struct net_device *dev)
937 if (skb == NULL) 937 if (skb == NULL)
938 { 938 {
939 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); 939 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
940 streamer_priv->streamer_stats.rx_dropped++; 940 dev->stats.rx_dropped++;
941 } else { /* we allocated an skb OK */ 941 } else { /* we allocated an skb OK */
942 if (buffer_cnt == 1) { 942 if (buffer_cnt == 1) {
943 /* release the DMA mapping */ 943 /* release the DMA mapping */
@@ -1009,8 +1009,8 @@ static void streamer_rx(struct net_device *dev)
1009 /* send up to the protocol */ 1009 /* send up to the protocol */
1010 netif_rx(skb); 1010 netif_rx(skb);
1011 } 1011 }
1012 streamer_priv->streamer_stats.rx_packets++; 1012 dev->stats.rx_packets++;
1013 streamer_priv->streamer_stats.rx_bytes += length; 1013 dev->stats.rx_bytes += length;
1014 } /* if skb == null */ 1014 } /* if skb == null */
1015 } /* end received without errors */ 1015 } /* end received without errors */
1016 1016
@@ -1053,8 +1053,8 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id)
1053 while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) { 1053 while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) {
1054 streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1); 1054 streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1);
1055 streamer_priv->free_tx_ring_entries++; 1055 streamer_priv->free_tx_ring_entries++;
1056 streamer_priv->streamer_stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len; 1056 dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
1057 streamer_priv->streamer_stats.tx_packets++; 1057 dev->stats.tx_packets++;
1058 dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]); 1058 dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]);
1059 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef; 1059 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef;
1060 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0; 1060 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0;
@@ -1484,13 +1484,6 @@ static void streamer_srb_bh(struct net_device *dev)
1484 } /* switch srb[0] */ 1484 } /* switch srb[0] */
1485} 1485}
1486 1486
1487static struct net_device_stats *streamer_get_stats(struct net_device *dev)
1488{
1489 struct streamer_private *streamer_priv;
1490 streamer_priv = netdev_priv(dev);
1491 return (struct net_device_stats *) &streamer_priv->streamer_stats;
1492}
1493
1494static int streamer_set_mac_address(struct net_device *dev, void *addr) 1487static int streamer_set_mac_address(struct net_device *dev, void *addr)
1495{ 1488{
1496 struct sockaddr *saddr = addr; 1489 struct sockaddr *saddr = addr;
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h
index 13ccee6449c1..3c58d6a3fbc9 100644
--- a/drivers/net/tokenring/lanstreamer.h
+++ b/drivers/net/tokenring/lanstreamer.h
@@ -299,7 +299,6 @@ struct streamer_private {
299 int tx_ring_free, tx_ring_last_status, rx_ring_last_received, 299 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,
300 free_tx_ring_entries; 300 free_tx_ring_entries;
301 301
302 struct net_device_stats streamer_stats;
303 __u16 streamer_lan_status; 302 __u16 streamer_lan_status;
304 __u8 streamer_ring_speed; 303 __u8 streamer_ring_speed;
305 __u16 pkt_buf_sz; 304 __u16 pkt_buf_sz;
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index ecb5c7c96910..77dc9da4c0b9 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -187,7 +187,6 @@ static int olympic_close(struct net_device *dev);
187static void olympic_set_rx_mode(struct net_device *dev); 187static void olympic_set_rx_mode(struct net_device *dev);
188static void olympic_freemem(struct net_device *dev) ; 188static void olympic_freemem(struct net_device *dev) ;
189static irqreturn_t olympic_interrupt(int irq, void *dev_id); 189static irqreturn_t olympic_interrupt(int irq, void *dev_id);
190static struct net_device_stats * olympic_get_stats(struct net_device *dev);
191static int olympic_set_mac_address(struct net_device *dev, void *addr) ; 190static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
192static void olympic_arb_cmd(struct net_device *dev); 191static void olympic_arb_cmd(struct net_device *dev);
193static int olympic_change_mtu(struct net_device *dev, int mtu); 192static int olympic_change_mtu(struct net_device *dev, int mtu);
@@ -195,6 +194,15 @@ static void olympic_srb_bh(struct net_device *dev) ;
195static void olympic_asb_bh(struct net_device *dev) ; 194static void olympic_asb_bh(struct net_device *dev) ;
196static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ; 195static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
197 196
197static const struct net_device_ops olympic_netdev_ops = {
198 .ndo_open = olympic_open,
199 .ndo_stop = olympic_close,
200 .ndo_start_xmit = olympic_xmit,
201 .ndo_change_mtu = olympic_change_mtu,
202 .ndo_set_multicast_list = olympic_set_rx_mode,
203 .ndo_set_mac_address = olympic_set_mac_address,
204};
205
198static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 206static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
199{ 207{
200 struct net_device *dev ; 208 struct net_device *dev ;
@@ -253,14 +261,7 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device
253 goto op_free_iomap; 261 goto op_free_iomap;
254 } 262 }
255 263
256 dev->open=&olympic_open; 264 dev->netdev_ops = &olympic_netdev_ops;
257 dev->hard_start_xmit=&olympic_xmit;
258 dev->change_mtu=&olympic_change_mtu;
259 dev->stop=&olympic_close;
260 dev->do_ioctl=NULL;
261 dev->set_multicast_list=&olympic_set_rx_mode;
262 dev->get_stats=&olympic_get_stats ;
263 dev->set_mac_address=&olympic_set_mac_address ;
264 SET_NETDEV_DEV(dev, &pdev->dev); 265 SET_NETDEV_DEV(dev, &pdev->dev);
265 266
266 pci_set_drvdata(pdev,dev) ; 267 pci_set_drvdata(pdev,dev) ;
@@ -785,7 +786,7 @@ static void olympic_rx(struct net_device *dev)
785 } 786 }
786 olympic_priv->rx_ring_last_received += i ; 787 olympic_priv->rx_ring_last_received += i ;
787 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; 788 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
788 olympic_priv->olympic_stats.rx_errors++; 789 dev->stats.rx_errors++;
789 } else { 790 } else {
790 791
791 if (buffer_cnt == 1) { 792 if (buffer_cnt == 1) {
@@ -796,7 +797,7 @@ static void olympic_rx(struct net_device *dev)
796 797
797 if (skb == NULL) { 798 if (skb == NULL) {
798 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ; 799 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
799 olympic_priv->olympic_stats.rx_dropped++ ; 800 dev->stats.rx_dropped++;
800 /* Update counters even though we don't transfer the frame */ 801 /* Update counters even though we don't transfer the frame */
801 olympic_priv->rx_ring_last_received += i ; 802 olympic_priv->rx_ring_last_received += i ;
802 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; 803 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
@@ -862,8 +863,8 @@ static void olympic_rx(struct net_device *dev)
862 skb->protocol = tr_type_trans(skb,dev); 863 skb->protocol = tr_type_trans(skb,dev);
863 netif_rx(skb) ; 864 netif_rx(skb) ;
864 } 865 }
865 olympic_priv->olympic_stats.rx_packets++ ; 866 dev->stats.rx_packets++ ;
866 olympic_priv->olympic_stats.rx_bytes += length ; 867 dev->stats.rx_bytes += length ;
867 } /* if skb == null */ 868 } /* if skb == null */
868 } /* If status & 0x3b */ 869 } /* If status & 0x3b */
869 870
@@ -971,8 +972,8 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id)
971 olympic_priv->tx_ring_last_status++; 972 olympic_priv->tx_ring_last_status++;
972 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1); 973 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
973 olympic_priv->free_tx_ring_entries++; 974 olympic_priv->free_tx_ring_entries++;
974 olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len; 975 dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
975 olympic_priv->olympic_stats.tx_packets++ ; 976 dev->stats.tx_packets++ ;
976 pci_unmap_single(olympic_priv->pdev, 977 pci_unmap_single(olympic_priv->pdev,
977 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), 978 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
978 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE); 979 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
@@ -1344,13 +1345,6 @@ static void olympic_srb_bh(struct net_device *dev)
1344 1345
1345} 1346}
1346 1347
1347static struct net_device_stats * olympic_get_stats(struct net_device *dev)
1348{
1349 struct olympic_private *olympic_priv ;
1350 olympic_priv=netdev_priv(dev);
1351 return (struct net_device_stats *) &olympic_priv->olympic_stats;
1352}
1353
1354static int olympic_set_mac_address (struct net_device *dev, void *addr) 1348static int olympic_set_mac_address (struct net_device *dev, void *addr)
1355{ 1349{
1356 struct sockaddr *saddr = addr ; 1350 struct sockaddr *saddr = addr ;
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h
index 10fbba08978f..30631bae4c94 100644
--- a/drivers/net/tokenring/olympic.h
+++ b/drivers/net/tokenring/olympic.h
@@ -275,7 +275,6 @@ struct olympic_private {
275 struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE]; 275 struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE];
276 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries; 276 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries;
277 277
278 struct net_device_stats olympic_stats ;
279 u16 olympic_lan_status ; 278 u16 olympic_lan_status ;
280 u8 olympic_ring_speed ; 279 u8 olympic_ring_speed ;
281 u16 pkt_buf_sz ; 280 u16 pkt_buf_sz ;
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 5be34c2fd483..b11bb72dc7ab 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -2330,6 +2330,17 @@ void tmsdev_term(struct net_device *dev)
2330 DMA_BIDIRECTIONAL); 2330 DMA_BIDIRECTIONAL);
2331} 2331}
2332 2332
2333const struct net_device_ops tms380tr_netdev_ops = {
2334 .ndo_open = tms380tr_open,
2335 .ndo_stop = tms380tr_close,
2336 .ndo_start_xmit = tms380tr_send_packet,
2337 .ndo_tx_timeout = tms380tr_timeout,
2338 .ndo_get_stats = tms380tr_get_stats,
2339 .ndo_set_multicast_list = tms380tr_set_multicast_list,
2340 .ndo_set_mac_address = tms380tr_set_mac_address,
2341};
2342EXPORT_SYMBOL(tms380tr_netdev_ops);
2343
2333int tmsdev_init(struct net_device *dev, struct device *pdev) 2344int tmsdev_init(struct net_device *dev, struct device *pdev)
2334{ 2345{
2335 struct net_local *tms_local; 2346 struct net_local *tms_local;
@@ -2353,16 +2364,8 @@ int tmsdev_init(struct net_device *dev, struct device *pdev)
2353 return -ENOMEM; 2364 return -ENOMEM;
2354 } 2365 }
2355 2366
2356 /* These can be overridden by the card driver if needed */ 2367 dev->netdev_ops = &tms380tr_netdev_ops;
2357 dev->open = tms380tr_open;
2358 dev->stop = tms380tr_close;
2359 dev->do_ioctl = NULL;
2360 dev->hard_start_xmit = tms380tr_send_packet;
2361 dev->tx_timeout = tms380tr_timeout;
2362 dev->watchdog_timeo = HZ; 2368 dev->watchdog_timeo = HZ;
2363 dev->get_stats = tms380tr_get_stats;
2364 dev->set_multicast_list = &tms380tr_set_multicast_list;
2365 dev->set_mac_address = tms380tr_set_mac_address;
2366 2369
2367 return 0; 2370 return 0;
2368} 2371}
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
index 7af76d708849..60b30ee38dcb 100644
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -14,6 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15 15
16/* module prototypes */ 16/* module prototypes */
17extern const struct net_device_ops tms380tr_netdev_ops;
17int tms380tr_open(struct net_device *dev); 18int tms380tr_open(struct net_device *dev);
18int tms380tr_close(struct net_device *dev); 19int tms380tr_close(struct net_device *dev);
19irqreturn_t tms380tr_interrupt(int irq, void *dev_id); 20irqreturn_t tms380tr_interrupt(int irq, void *dev_id);
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 5f601773c260..b397e8785d6d 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -157,8 +157,8 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
157 157
158 tp->tmspriv = cardinfo; 158 tp->tmspriv = cardinfo;
159 159
160 dev->open = tms380tr_open; 160 dev->netdev_ops = &tms380tr_netdev_ops;
161 dev->stop = tms380tr_close; 161
162 pci_set_drvdata(pdev, dev); 162 pci_set_drvdata(pdev, dev);
163 SET_NETDEV_DEV(dev, &pdev->dev); 163 SET_NETDEV_DEV(dev, &pdev->dev);
164 164
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 75461dbd4876..1138782e5611 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -888,7 +888,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget)
888 888
889 if (num_received < budget) { 889 if (num_received < budget) {
890 data->rxpending = 0; 890 data->rxpending = 0;
891 netif_rx_complete(napi); 891 napi_complete(napi);
892 892
893 TSI_WRITE(TSI108_EC_INTMASK, 893 TSI_WRITE(TSI108_EC_INTMASK,
894 TSI_READ(TSI108_EC_INTMASK) 894 TSI_READ(TSI108_EC_INTMASK)
@@ -915,11 +915,11 @@ static void tsi108_rx_int(struct net_device *dev)
915 * 915 *
916 * This can happen if this code races with tsi108_poll(), which masks 916 * This can happen if this code races with tsi108_poll(), which masks
917 * the interrupts after tsi108_irq_one() read the mask, but before 917 * the interrupts after tsi108_irq_one() read the mask, but before
918 * netif_rx_schedule is called. It could also happen due to calls 918 * napi_schedule is called. It could also happen due to calls
919 * from tsi108_check_rxring(). 919 * from tsi108_check_rxring().
920 */ 920 */
921 921
922 if (netif_rx_schedule_prep(&data->napi)) { 922 if (napi_schedule_prep(&data->napi)) {
923 /* Mask, rather than ack, the receive interrupts. The ack 923 /* Mask, rather than ack, the receive interrupts. The ack
924 * will happen in tsi108_poll(). 924 * will happen in tsi108_poll().
925 */ 925 */
@@ -930,7 +930,7 @@ static void tsi108_rx_int(struct net_device *dev)
930 | TSI108_INT_RXTHRESH | 930 | TSI108_INT_RXTHRESH |
931 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | 931 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
932 TSI108_INT_RXWAIT); 932 TSI108_INT_RXWAIT);
933 __netif_rx_schedule(&data->napi); 933 __napi_schedule(&data->napi);
934 } else { 934 } else {
935 if (!netif_running(dev)) { 935 if (!netif_running(dev)) {
936 /* This can happen if an interrupt occurs while the 936 /* This can happen if an interrupt occurs while the
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 6c3428a37c0b..9f946d421088 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -103,7 +103,7 @@ void oom_timer(unsigned long data)
103{ 103{
104 struct net_device *dev = (struct net_device *)data; 104 struct net_device *dev = (struct net_device *)data;
105 struct tulip_private *tp = netdev_priv(dev); 105 struct tulip_private *tp = netdev_priv(dev);
106 netif_rx_schedule(&tp->napi); 106 napi_schedule(&tp->napi);
107} 107}
108 108
109int tulip_poll(struct napi_struct *napi, int budget) 109int tulip_poll(struct napi_struct *napi, int budget)
@@ -300,7 +300,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
300 300
301 /* Remove us from polling list and enable RX intr. */ 301 /* Remove us from polling list and enable RX intr. */
302 302
303 netif_rx_complete(napi); 303 napi_complete(napi);
304 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); 304 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
305 305
306 /* The last op happens after poll completion. Which means the following: 306 /* The last op happens after poll completion. Which means the following:
@@ -333,10 +333,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
333 333
334 /* Think: timer_pending() was an explicit signature of bug. 334 /* Think: timer_pending() was an explicit signature of bug.
335 * Timer can be pending now but fired and completed 335 * Timer can be pending now but fired and completed
336 * before we did netif_rx_complete(). See? We would lose it. */ 336 * before we did napi_complete(). See? We would lose it. */
337 337
338 /* remove ourselves from the polling list */ 338 /* remove ourselves from the polling list */
339 netif_rx_complete(napi); 339 napi_complete(napi);
340 340
341 return work_done; 341 return work_done;
342} 342}
@@ -519,7 +519,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
519 rxd++; 519 rxd++;
520 /* Mask RX intrs and add the device to poll list. */ 520 /* Mask RX intrs and add the device to poll list. */
521 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); 521 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
522 netif_rx_schedule(&tp->napi); 522 napi_schedule(&tp->napi);
523 523
524 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) 524 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
525 break; 525 break;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d7b81e4fdd56..e9bcbdfe015a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -63,6 +63,7 @@
63#include <linux/virtio_net.h> 63#include <linux/virtio_net.h>
64#include <net/net_namespace.h> 64#include <net/net_namespace.h>
65#include <net/netns/generic.h> 65#include <net/netns/generic.h>
66#include <net/rtnetlink.h>
66 67
67#include <asm/system.h> 68#include <asm/system.h>
68#include <asm/uaccess.h> 69#include <asm/uaccess.h>
@@ -87,14 +88,19 @@ struct tap_filter {
87 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 88 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
88}; 89};
89 90
91struct tun_file {
92 atomic_t count;
93 struct tun_struct *tun;
94 struct net *net;
95 wait_queue_head_t read_wait;
96};
97
90struct tun_struct { 98struct tun_struct {
91 struct list_head list; 99 struct tun_file *tfile;
92 unsigned int flags; 100 unsigned int flags;
93 int attached;
94 uid_t owner; 101 uid_t owner;
95 gid_t group; 102 gid_t group;
96 103
97 wait_queue_head_t read_wait;
98 struct sk_buff_head readq; 104 struct sk_buff_head readq;
99 105
100 struct net_device *dev; 106 struct net_device *dev;
@@ -107,6 +113,88 @@ struct tun_struct {
107#endif 113#endif
108}; 114};
109 115
116static int tun_attach(struct tun_struct *tun, struct file *file)
117{
118 struct tun_file *tfile = file->private_data;
119 const struct cred *cred = current_cred();
120 int err;
121
122 ASSERT_RTNL();
123
124 /* Check permissions */
125 if (((tun->owner != -1 && cred->euid != tun->owner) ||
126 (tun->group != -1 && cred->egid != tun->group)) &&
127 !capable(CAP_NET_ADMIN))
128 return -EPERM;
129
130 netif_tx_lock_bh(tun->dev);
131
132 err = -EINVAL;
133 if (tfile->tun)
134 goto out;
135
136 err = -EBUSY;
137 if (tun->tfile)
138 goto out;
139
140 err = 0;
141 tfile->tun = tun;
142 tun->tfile = tfile;
143 dev_hold(tun->dev);
144 atomic_inc(&tfile->count);
145
146out:
147 netif_tx_unlock_bh(tun->dev);
148 return err;
149}
150
151static void __tun_detach(struct tun_struct *tun)
152{
153 struct tun_file *tfile = tun->tfile;
154
155 /* Detach from net device */
156 netif_tx_lock_bh(tun->dev);
157 tfile->tun = NULL;
158 tun->tfile = NULL;
159 netif_tx_unlock_bh(tun->dev);
160
161 /* Drop read queue */
162 skb_queue_purge(&tun->readq);
163
164 /* Drop the extra count on the net device */
165 dev_put(tun->dev);
166}
167
168static void tun_detach(struct tun_struct *tun)
169{
170 rtnl_lock();
171 __tun_detach(tun);
172 rtnl_unlock();
173}
174
175static struct tun_struct *__tun_get(struct tun_file *tfile)
176{
177 struct tun_struct *tun = NULL;
178
179 if (atomic_inc_not_zero(&tfile->count))
180 tun = tfile->tun;
181
182 return tun;
183}
184
185static struct tun_struct *tun_get(struct file *file)
186{
187 return __tun_get(file->private_data);
188}
189
190static void tun_put(struct tun_struct *tun)
191{
192 struct tun_file *tfile = tun->tfile;
193
194 if (atomic_dec_and_test(&tfile->count))
195 tun_detach(tfile->tun);
196}
197
110/* TAP filterting */ 198/* TAP filterting */
111static void addr_hash_set(u32 *mask, const u8 *addr) 199static void addr_hash_set(u32 *mask, const u8 *addr)
112{ 200{
@@ -213,13 +301,23 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
213 301
214/* Network device part of the driver */ 302/* Network device part of the driver */
215 303
216static int tun_net_id;
217struct tun_net {
218 struct list_head dev_list;
219};
220
221static const struct ethtool_ops tun_ethtool_ops; 304static const struct ethtool_ops tun_ethtool_ops;
222 305
306/* Net device detach from fd. */
307static void tun_net_uninit(struct net_device *dev)
308{
309 struct tun_struct *tun = netdev_priv(dev);
310 struct tun_file *tfile = tun->tfile;
311
312 /* Inform the methods they need to stop using the dev.
313 */
314 if (tfile) {
315 wake_up_all(&tfile->read_wait);
316 if (atomic_dec_and_test(&tfile->count))
317 __tun_detach(tun);
318 }
319}
320
223/* Net device open. */ 321/* Net device open. */
224static int tun_net_open(struct net_device *dev) 322static int tun_net_open(struct net_device *dev)
225{ 323{
@@ -242,7 +340,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
242 DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); 340 DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
243 341
244 /* Drop packet if interface is not attached */ 342 /* Drop packet if interface is not attached */
245 if (!tun->attached) 343 if (!tun->tfile)
246 goto drop; 344 goto drop;
247 345
248 /* Drop if the filter does not like it. 346 /* Drop if the filter does not like it.
@@ -274,7 +372,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
274 /* Notify and wake up reader process */ 372 /* Notify and wake up reader process */
275 if (tun->flags & TUN_FASYNC) 373 if (tun->flags & TUN_FASYNC)
276 kill_fasync(&tun->fasync, SIGIO, POLL_IN); 374 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
277 wake_up_interruptible(&tun->read_wait); 375 wake_up_interruptible(&tun->tfile->read_wait);
278 return 0; 376 return 0;
279 377
280drop: 378drop:
@@ -306,6 +404,7 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
306} 404}
307 405
308static const struct net_device_ops tun_netdev_ops = { 406static const struct net_device_ops tun_netdev_ops = {
407 .ndo_uninit = tun_net_uninit,
309 .ndo_open = tun_net_open, 408 .ndo_open = tun_net_open,
310 .ndo_stop = tun_net_close, 409 .ndo_stop = tun_net_close,
311 .ndo_start_xmit = tun_net_xmit, 410 .ndo_start_xmit = tun_net_xmit,
@@ -313,6 +412,7 @@ static const struct net_device_ops tun_netdev_ops = {
313}; 412};
314 413
315static const struct net_device_ops tap_netdev_ops = { 414static const struct net_device_ops tap_netdev_ops = {
415 .ndo_uninit = tun_net_uninit,
316 .ndo_open = tun_net_open, 416 .ndo_open = tun_net_open,
317 .ndo_stop = tun_net_close, 417 .ndo_stop = tun_net_close,
318 .ndo_start_xmit = tun_net_xmit, 418 .ndo_start_xmit = tun_net_xmit,
@@ -359,19 +459,24 @@ static void tun_net_init(struct net_device *dev)
359/* Poll */ 459/* Poll */
360static unsigned int tun_chr_poll(struct file *file, poll_table * wait) 460static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
361{ 461{
362 struct tun_struct *tun = file->private_data; 462 struct tun_file *tfile = file->private_data;
463 struct tun_struct *tun = __tun_get(tfile);
363 unsigned int mask = POLLOUT | POLLWRNORM; 464 unsigned int mask = POLLOUT | POLLWRNORM;
364 465
365 if (!tun) 466 if (!tun)
366 return -EBADFD; 467 return POLLERR;
367 468
368 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); 469 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
369 470
370 poll_wait(file, &tun->read_wait, wait); 471 poll_wait(file, &tfile->read_wait, wait);
371 472
372 if (!skb_queue_empty(&tun->readq)) 473 if (!skb_queue_empty(&tun->readq))
373 mask |= POLLIN | POLLRDNORM; 474 mask |= POLLIN | POLLRDNORM;
374 475
476 if (tun->dev->reg_state != NETREG_REGISTERED)
477 mask = POLLERR;
478
479 tun_put(tun);
375 return mask; 480 return mask;
376} 481}
377 482
@@ -556,14 +661,18 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
556static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, 661static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
557 unsigned long count, loff_t pos) 662 unsigned long count, loff_t pos)
558{ 663{
559 struct tun_struct *tun = iocb->ki_filp->private_data; 664 struct tun_struct *tun = tun_get(iocb->ki_filp);
665 ssize_t result;
560 666
561 if (!tun) 667 if (!tun)
562 return -EBADFD; 668 return -EBADFD;
563 669
564 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); 670 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
565 671
566 return tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count)); 672 result = tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count));
673
674 tun_put(tun);
675 return result;
567} 676}
568 677
569/* Put packet to the user space buffer */ 678/* Put packet to the user space buffer */
@@ -636,7 +745,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
636 unsigned long count, loff_t pos) 745 unsigned long count, loff_t pos)
637{ 746{
638 struct file *file = iocb->ki_filp; 747 struct file *file = iocb->ki_filp;
639 struct tun_struct *tun = file->private_data; 748 struct tun_file *tfile = file->private_data;
749 struct tun_struct *tun = __tun_get(tfile);
640 DECLARE_WAITQUEUE(wait, current); 750 DECLARE_WAITQUEUE(wait, current);
641 struct sk_buff *skb; 751 struct sk_buff *skb;
642 ssize_t len, ret = 0; 752 ssize_t len, ret = 0;
@@ -647,10 +757,12 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
647 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 757 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
648 758
649 len = iov_length(iv, count); 759 len = iov_length(iv, count);
650 if (len < 0) 760 if (len < 0) {
651 return -EINVAL; 761 ret = -EINVAL;
762 goto out;
763 }
652 764
653 add_wait_queue(&tun->read_wait, &wait); 765 add_wait_queue(&tfile->read_wait, &wait);
654 while (len) { 766 while (len) {
655 current->state = TASK_INTERRUPTIBLE; 767 current->state = TASK_INTERRUPTIBLE;
656 768
@@ -664,6 +776,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
664 ret = -ERESTARTSYS; 776 ret = -ERESTARTSYS;
665 break; 777 break;
666 } 778 }
779 if (tun->dev->reg_state != NETREG_REGISTERED) {
780 ret = -EIO;
781 break;
782 }
667 783
668 /* Nothing to read, let's sleep */ 784 /* Nothing to read, let's sleep */
669 schedule(); 785 schedule();
@@ -677,8 +793,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
677 } 793 }
678 794
679 current->state = TASK_RUNNING; 795 current->state = TASK_RUNNING;
680 remove_wait_queue(&tun->read_wait, &wait); 796 remove_wait_queue(&tfile->read_wait, &wait);
681 797
798out:
799 tun_put(tun);
682 return ret; 800 return ret;
683} 801}
684 802
@@ -687,54 +805,49 @@ static void tun_setup(struct net_device *dev)
687 struct tun_struct *tun = netdev_priv(dev); 805 struct tun_struct *tun = netdev_priv(dev);
688 806
689 skb_queue_head_init(&tun->readq); 807 skb_queue_head_init(&tun->readq);
690 init_waitqueue_head(&tun->read_wait);
691 808
692 tun->owner = -1; 809 tun->owner = -1;
693 tun->group = -1; 810 tun->group = -1;
694 811
695 dev->ethtool_ops = &tun_ethtool_ops; 812 dev->ethtool_ops = &tun_ethtool_ops;
696 dev->destructor = free_netdev; 813 dev->destructor = free_netdev;
697 dev->features |= NETIF_F_NETNS_LOCAL;
698} 814}
699 815
700static struct tun_struct *tun_get_by_name(struct tun_net *tn, const char *name) 816/* Trivial set of netlink ops to allow deleting tun or tap
817 * device with netlink.
818 */
819static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
701{ 820{
702 struct tun_struct *tun; 821 return -EINVAL;
822}
703 823
704 ASSERT_RTNL(); 824static struct rtnl_link_ops tun_link_ops __read_mostly = {
705 list_for_each_entry(tun, &tn->dev_list, list) { 825 .kind = DRV_NAME,
706 if (!strncmp(tun->dev->name, name, IFNAMSIZ)) 826 .priv_size = sizeof(struct tun_struct),
707 return tun; 827 .setup = tun_setup,
708 } 828 .validate = tun_validate,
829};
709 830
710 return NULL;
711}
712 831
713static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 832static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
714{ 833{
715 struct tun_net *tn;
716 struct tun_struct *tun; 834 struct tun_struct *tun;
717 struct net_device *dev; 835 struct net_device *dev;
718 const struct cred *cred = current_cred();
719 int err; 836 int err;
720 837
721 tn = net_generic(net, tun_net_id); 838 dev = __dev_get_by_name(net, ifr->ifr_name);
722 tun = tun_get_by_name(tn, ifr->ifr_name); 839 if (dev) {
723 if (tun) { 840 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
724 if (tun->attached) 841 tun = netdev_priv(dev);
725 return -EBUSY; 842 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
726 843 tun = netdev_priv(dev);
727 /* Check permissions */ 844 else
728 if (((tun->owner != -1 && 845 return -EINVAL;
729 cred->euid != tun->owner) || 846
730 (tun->group != -1 && 847 err = tun_attach(tun, file);
731 cred->egid != tun->group)) && 848 if (err < 0)
732 !capable(CAP_NET_ADMIN)) { 849 return err;
733 return -EPERM;
734 }
735 } 850 }
736 else if (__dev_get_by_name(net, ifr->ifr_name))
737 return -EINVAL;
738 else { 851 else {
739 char *name; 852 char *name;
740 unsigned long flags = 0; 853 unsigned long flags = 0;
@@ -765,6 +878,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
765 return -ENOMEM; 878 return -ENOMEM;
766 879
767 dev_net_set(dev, net); 880 dev_net_set(dev, net);
881 dev->rtnl_link_ops = &tun_link_ops;
768 882
769 tun = netdev_priv(dev); 883 tun = netdev_priv(dev);
770 tun->dev = dev; 884 tun->dev = dev;
@@ -783,7 +897,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
783 if (err < 0) 897 if (err < 0)
784 goto err_free_dev; 898 goto err_free_dev;
785 899
786 list_add(&tun->list, &tn->dev_list); 900 err = tun_attach(tun, file);
901 if (err < 0)
902 goto err_free_dev;
787 } 903 }
788 904
789 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); 905 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
@@ -803,10 +919,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
803 else 919 else
804 tun->flags &= ~TUN_VNET_HDR; 920 tun->flags &= ~TUN_VNET_HDR;
805 921
806 file->private_data = tun;
807 tun->attached = 1;
808 get_net(dev_net(tun->dev));
809
810 /* Make sure persistent devices do not get stuck in 922 /* Make sure persistent devices do not get stuck in
811 * xoff state. 923 * xoff state.
812 */ 924 */
@@ -824,7 +936,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
824 936
825static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) 937static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
826{ 938{
827 struct tun_struct *tun = file->private_data; 939 struct tun_struct *tun = tun_get(file);
828 940
829 if (!tun) 941 if (!tun)
830 return -EBADFD; 942 return -EBADFD;
@@ -849,6 +961,7 @@ static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
849 if (tun->flags & TUN_VNET_HDR) 961 if (tun->flags & TUN_VNET_HDR)
850 ifr->ifr_flags |= IFF_VNET_HDR; 962 ifr->ifr_flags |= IFF_VNET_HDR;
851 963
964 tun_put(tun);
852 return 0; 965 return 0;
853} 966}
854 967
@@ -895,7 +1008,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
895static int tun_chr_ioctl(struct inode *inode, struct file *file, 1008static int tun_chr_ioctl(struct inode *inode, struct file *file,
896 unsigned int cmd, unsigned long arg) 1009 unsigned int cmd, unsigned long arg)
897{ 1010{
898 struct tun_struct *tun = file->private_data; 1011 struct tun_file *tfile = file->private_data;
1012 struct tun_struct *tun;
899 void __user* argp = (void __user*)arg; 1013 void __user* argp = (void __user*)arg;
900 struct ifreq ifr; 1014 struct ifreq ifr;
901 int ret; 1015 int ret;
@@ -904,13 +1018,23 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
904 if (copy_from_user(&ifr, argp, sizeof ifr)) 1018 if (copy_from_user(&ifr, argp, sizeof ifr))
905 return -EFAULT; 1019 return -EFAULT;
906 1020
1021 if (cmd == TUNGETFEATURES) {
1022 /* Currently this just means: "what IFF flags are valid?".
1023 * This is needed because we never checked for invalid flags on
1024 * TUNSETIFF. */
1025 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1026 IFF_VNET_HDR,
1027 (unsigned int __user*)argp);
1028 }
1029
1030 tun = __tun_get(tfile);
907 if (cmd == TUNSETIFF && !tun) { 1031 if (cmd == TUNSETIFF && !tun) {
908 int err; 1032 int err;
909 1033
910 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 1034 ifr.ifr_name[IFNAMSIZ-1] = '\0';
911 1035
912 rtnl_lock(); 1036 rtnl_lock();
913 err = tun_set_iff(current->nsproxy->net_ns, file, &ifr); 1037 err = tun_set_iff(tfile->net, file, &ifr);
914 rtnl_unlock(); 1038 rtnl_unlock();
915 1039
916 if (err) 1040 if (err)
@@ -921,28 +1045,21 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
921 return 0; 1045 return 0;
922 } 1046 }
923 1047
924 if (cmd == TUNGETFEATURES) {
925 /* Currently this just means: "what IFF flags are valid?".
926 * This is needed because we never checked for invalid flags on
927 * TUNSETIFF. */
928 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
929 IFF_VNET_HDR,
930 (unsigned int __user*)argp);
931 }
932 1048
933 if (!tun) 1049 if (!tun)
934 return -EBADFD; 1050 return -EBADFD;
935 1051
936 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); 1052 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
937 1053
1054 ret = 0;
938 switch (cmd) { 1055 switch (cmd) {
939 case TUNGETIFF: 1056 case TUNGETIFF:
940 ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); 1057 ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
941 if (ret) 1058 if (ret)
942 return ret; 1059 break;
943 1060
944 if (copy_to_user(argp, &ifr, sizeof(ifr))) 1061 if (copy_to_user(argp, &ifr, sizeof(ifr)))
945 return -EFAULT; 1062 ret = -EFAULT;
946 break; 1063 break;
947 1064
948 case TUNSETNOCSUM: 1065 case TUNSETNOCSUM:
@@ -994,7 +1111,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
994 ret = 0; 1111 ret = 0;
995 } 1112 }
996 rtnl_unlock(); 1113 rtnl_unlock();
997 return ret; 1114 break;
998 1115
999#ifdef TUN_DEBUG 1116#ifdef TUN_DEBUG
1000 case TUNSETDEBUG: 1117 case TUNSETDEBUG:
@@ -1005,24 +1122,25 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1005 rtnl_lock(); 1122 rtnl_lock();
1006 ret = set_offload(tun->dev, arg); 1123 ret = set_offload(tun->dev, arg);
1007 rtnl_unlock(); 1124 rtnl_unlock();
1008 return ret; 1125 break;
1009 1126
1010 case TUNSETTXFILTER: 1127 case TUNSETTXFILTER:
1011 /* Can be set only for TAPs */ 1128 /* Can be set only for TAPs */
1129 ret = -EINVAL;
1012 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 1130 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1013 return -EINVAL; 1131 break;
1014 rtnl_lock(); 1132 rtnl_lock();
1015 ret = update_filter(&tun->txflt, (void __user *)arg); 1133 ret = update_filter(&tun->txflt, (void __user *)arg);
1016 rtnl_unlock(); 1134 rtnl_unlock();
1017 return ret; 1135 break;
1018 1136
1019 case SIOCGIFHWADDR: 1137 case SIOCGIFHWADDR:
1020 /* Get hw addres */ 1138 /* Get hw addres */
1021 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 1139 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1022 ifr.ifr_hwaddr.sa_family = tun->dev->type; 1140 ifr.ifr_hwaddr.sa_family = tun->dev->type;
1023 if (copy_to_user(argp, &ifr, sizeof ifr)) 1141 if (copy_to_user(argp, &ifr, sizeof ifr))
1024 return -EFAULT; 1142 ret = -EFAULT;
1025 return 0; 1143 break;
1026 1144
1027 case SIOCSIFHWADDR: 1145 case SIOCSIFHWADDR:
1028 /* Set hw address */ 1146 /* Set hw address */
@@ -1032,18 +1150,19 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1032 rtnl_lock(); 1150 rtnl_lock();
1033 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 1151 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1034 rtnl_unlock(); 1152 rtnl_unlock();
1035 return ret; 1153 break;
1036
1037 default: 1154 default:
1038 return -EINVAL; 1155 ret = -EINVAL;
1156 break;
1039 }; 1157 };
1040 1158
1041 return 0; 1159 tun_put(tun);
1160 return ret;
1042} 1161}
1043 1162
1044static int tun_chr_fasync(int fd, struct file *file, int on) 1163static int tun_chr_fasync(int fd, struct file *file, int on)
1045{ 1164{
1046 struct tun_struct *tun = file->private_data; 1165 struct tun_struct *tun = tun_get(file);
1047 int ret; 1166 int ret;
1048 1167
1049 if (!tun) 1168 if (!tun)
@@ -1065,42 +1184,48 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1065 ret = 0; 1184 ret = 0;
1066out: 1185out:
1067 unlock_kernel(); 1186 unlock_kernel();
1187 tun_put(tun);
1068 return ret; 1188 return ret;
1069} 1189}
1070 1190
1071static int tun_chr_open(struct inode *inode, struct file * file) 1191static int tun_chr_open(struct inode *inode, struct file * file)
1072{ 1192{
1193 struct tun_file *tfile;
1073 cycle_kernel_lock(); 1194 cycle_kernel_lock();
1074 DBG1(KERN_INFO "tunX: tun_chr_open\n"); 1195 DBG1(KERN_INFO "tunX: tun_chr_open\n");
1075 file->private_data = NULL; 1196
1197 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
1198 if (!tfile)
1199 return -ENOMEM;
1200 atomic_set(&tfile->count, 0);
1201 tfile->tun = NULL;
1202 tfile->net = get_net(current->nsproxy->net_ns);
1203 init_waitqueue_head(&tfile->read_wait);
1204 file->private_data = tfile;
1076 return 0; 1205 return 0;
1077} 1206}
1078 1207
1079static int tun_chr_close(struct inode *inode, struct file *file) 1208static int tun_chr_close(struct inode *inode, struct file *file)
1080{ 1209{
1081 struct tun_struct *tun = file->private_data; 1210 struct tun_file *tfile = file->private_data;
1211 struct tun_struct *tun = __tun_get(tfile);
1082 1212
1083 if (!tun)
1084 return 0;
1085
1086 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1087 1213
1088 rtnl_lock(); 1214 if (tun) {
1215 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1089 1216
1090 /* Detach from net device */ 1217 rtnl_lock();
1091 file->private_data = NULL; 1218 __tun_detach(tun);
1092 tun->attached = 0;
1093 put_net(dev_net(tun->dev));
1094 1219
1095 /* Drop read queue */ 1220 /* If desireable, unregister the netdevice. */
1096 skb_queue_purge(&tun->readq); 1221 if (!(tun->flags & TUN_PERSIST))
1222 unregister_netdevice(tun->dev);
1097 1223
1098 if (!(tun->flags & TUN_PERSIST)) { 1224 rtnl_unlock();
1099 list_del(&tun->list);
1100 unregister_netdevice(tun->dev);
1101 } 1225 }
1102 1226
1103 rtnl_unlock(); 1227 put_net(tfile->net);
1228 kfree(tfile);
1104 1229
1105 return 0; 1230 return 0;
1106} 1231}
@@ -1181,7 +1306,7 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
1181static u32 tun_get_link(struct net_device *dev) 1306static u32 tun_get_link(struct net_device *dev)
1182{ 1307{
1183 struct tun_struct *tun = netdev_priv(dev); 1308 struct tun_struct *tun = netdev_priv(dev);
1184 return tun->attached; 1309 return !!tun->tfile;
1185} 1310}
1186 1311
1187static u32 tun_get_rx_csum(struct net_device *dev) 1312static u32 tun_get_rx_csum(struct net_device *dev)
@@ -1210,45 +1335,6 @@ static const struct ethtool_ops tun_ethtool_ops = {
1210 .set_rx_csum = tun_set_rx_csum 1335 .set_rx_csum = tun_set_rx_csum
1211}; 1336};
1212 1337
1213static int tun_init_net(struct net *net)
1214{
1215 struct tun_net *tn;
1216
1217 tn = kmalloc(sizeof(*tn), GFP_KERNEL);
1218 if (tn == NULL)
1219 return -ENOMEM;
1220
1221 INIT_LIST_HEAD(&tn->dev_list);
1222
1223 if (net_assign_generic(net, tun_net_id, tn)) {
1224 kfree(tn);
1225 return -ENOMEM;
1226 }
1227
1228 return 0;
1229}
1230
1231static void tun_exit_net(struct net *net)
1232{
1233 struct tun_net *tn;
1234 struct tun_struct *tun, *nxt;
1235
1236 tn = net_generic(net, tun_net_id);
1237
1238 rtnl_lock();
1239 list_for_each_entry_safe(tun, nxt, &tn->dev_list, list) {
1240 DBG(KERN_INFO "%s cleaned up\n", tun->dev->name);
1241 unregister_netdevice(tun->dev);
1242 }
1243 rtnl_unlock();
1244
1245 kfree(tn);
1246}
1247
1248static struct pernet_operations tun_net_ops = {
1249 .init = tun_init_net,
1250 .exit = tun_exit_net,
1251};
1252 1338
1253static int __init tun_init(void) 1339static int __init tun_init(void)
1254{ 1340{
@@ -1257,10 +1343,10 @@ static int __init tun_init(void)
1257 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 1343 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1258 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); 1344 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
1259 1345
1260 ret = register_pernet_gen_device(&tun_net_id, &tun_net_ops); 1346 ret = rtnl_link_register(&tun_link_ops);
1261 if (ret) { 1347 if (ret) {
1262 printk(KERN_ERR "tun: Can't register pernet ops\n"); 1348 printk(KERN_ERR "tun: Can't register link_ops\n");
1263 goto err_pernet; 1349 goto err_linkops;
1264 } 1350 }
1265 1351
1266 ret = misc_register(&tun_miscdev); 1352 ret = misc_register(&tun_miscdev);
@@ -1268,18 +1354,17 @@ static int __init tun_init(void)
1268 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); 1354 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
1269 goto err_misc; 1355 goto err_misc;
1270 } 1356 }
1271 return 0; 1357 return 0;
1272
1273err_misc: 1358err_misc:
1274 unregister_pernet_gen_device(tun_net_id, &tun_net_ops); 1359 rtnl_link_unregister(&tun_link_ops);
1275err_pernet: 1360err_linkops:
1276 return ret; 1361 return ret;
1277} 1362}
1278 1363
1279static void tun_cleanup(void) 1364static void tun_cleanup(void)
1280{ 1365{
1281 misc_deregister(&tun_miscdev); 1366 misc_deregister(&tun_miscdev);
1282 unregister_pernet_gen_device(tun_net_id, &tun_net_ops); 1367 rtnl_link_unregister(&tun_link_ops);
1283} 1368}
1284 1369
1285module_init(tun_init); 1370module_init(tun_init);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 3af9a9516ccb..a8e5651f3165 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1783,7 +1783,7 @@ typhoon_poll(struct napi_struct *napi, int budget)
1783 } 1783 }
1784 1784
1785 if (work_done < budget) { 1785 if (work_done < budget) {
1786 netif_rx_complete(napi); 1786 napi_complete(napi);
1787 iowrite32(TYPHOON_INTR_NONE, 1787 iowrite32(TYPHOON_INTR_NONE,
1788 tp->ioaddr + TYPHOON_REG_INTR_MASK); 1788 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1789 typhoon_post_pci_writes(tp->ioaddr); 1789 typhoon_post_pci_writes(tp->ioaddr);
@@ -1806,10 +1806,10 @@ typhoon_interrupt(int irq, void *dev_instance)
1806 1806
1807 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); 1807 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1808 1808
1809 if (netif_rx_schedule_prep(&tp->napi)) { 1809 if (napi_schedule_prep(&tp->napi)) {
1810 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); 1810 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1811 typhoon_post_pci_writes(ioaddr); 1811 typhoon_post_pci_writes(ioaddr);
1812 __netif_rx_schedule(&tp->napi); 1812 __napi_schedule(&tp->napi);
1813 } else { 1813 } else {
1814 printk(KERN_ERR "%s: Error, poll already scheduled\n", 1814 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1815 dev->name); 1815 dev->name);
@@ -1944,7 +1944,7 @@ typhoon_start_runtime(struct typhoon *tp)
1944 goto error_out; 1944 goto error_out;
1945 1945
1946 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); 1946 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1947 xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q); 1947 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1948 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1948 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1949 if(err < 0) 1949 if(err < 0)
1950 goto error_out; 1950 goto error_out;
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h
index dd7022ca7354..673fd5125914 100644
--- a/drivers/net/typhoon.h
+++ b/drivers/net/typhoon.h
@@ -174,18 +174,18 @@ struct tx_desc {
174 u64 tx_addr; /* opaque for hardware, for TX_DESC */ 174 u64 tx_addr; /* opaque for hardware, for TX_DESC */
175 }; 175 };
176 __le32 processFlags; 176 __le32 processFlags;
177#define TYPHOON_TX_PF_NO_CRC __constant_cpu_to_le32(0x00000001) 177#define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001)
178#define TYPHOON_TX_PF_IP_CHKSUM __constant_cpu_to_le32(0x00000002) 178#define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002)
179#define TYPHOON_TX_PF_TCP_CHKSUM __constant_cpu_to_le32(0x00000004) 179#define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004)
180#define TYPHOON_TX_PF_TCP_SEGMENT __constant_cpu_to_le32(0x00000008) 180#define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008)
181#define TYPHOON_TX_PF_INSERT_VLAN __constant_cpu_to_le32(0x00000010) 181#define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010)
182#define TYPHOON_TX_PF_IPSEC __constant_cpu_to_le32(0x00000020) 182#define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020)
183#define TYPHOON_TX_PF_VLAN_PRIORITY __constant_cpu_to_le32(0x00000040) 183#define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040)
184#define TYPHOON_TX_PF_UDP_CHKSUM __constant_cpu_to_le32(0x00000080) 184#define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080)
185#define TYPHOON_TX_PF_PAD_FRAME __constant_cpu_to_le32(0x00000100) 185#define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100)
186#define TYPHOON_TX_PF_RESERVED __constant_cpu_to_le32(0x00000e00) 186#define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00)
187#define TYPHOON_TX_PF_VLAN_MASK __constant_cpu_to_le32(0x0ffff000) 187#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000)
188#define TYPHOON_TX_PF_INTERNAL __constant_cpu_to_le32(0xf0000000) 188#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000)
189#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 189#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12
190} __attribute__ ((packed)); 190} __attribute__ ((packed));
191 191
@@ -203,8 +203,8 @@ struct tcpopt_desc {
203 u8 flags; 203 u8 flags;
204 u8 numDesc; 204 u8 numDesc;
205 __le16 mss_flags; 205 __le16 mss_flags;
206#define TYPHOON_TSO_FIRST __constant_cpu_to_le16(0x1000) 206#define TYPHOON_TSO_FIRST cpu_to_le16(0x1000)
207#define TYPHOON_TSO_LAST __constant_cpu_to_le16(0x2000) 207#define TYPHOON_TSO_LAST cpu_to_le16(0x2000)
208 __le32 respAddrLo; 208 __le32 respAddrLo;
209 __le32 bytesTx; 209 __le32 bytesTx;
210 __le32 status; 210 __le32 status;
@@ -222,8 +222,8 @@ struct ipsec_desc {
222 u8 flags; 222 u8 flags;
223 u8 numDesc; 223 u8 numDesc;
224 __le16 ipsecFlags; 224 __le16 ipsecFlags;
225#define TYPHOON_IPSEC_GEN_IV __constant_cpu_to_le16(0x0000) 225#define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000)
226#define TYPHOON_IPSEC_USE_IV __constant_cpu_to_le16(0x0001) 226#define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001)
227 __le32 sa1; 227 __le32 sa1;
228 __le32 sa2; 228 __le32 sa2;
229 __le32 reserved; 229 __le32 reserved;
@@ -248,41 +248,41 @@ struct rx_desc {
248 u32 addr; /* opaque, comes from virtAddr */ 248 u32 addr; /* opaque, comes from virtAddr */
249 u32 addrHi; /* opaque, comes from virtAddrHi */ 249 u32 addrHi; /* opaque, comes from virtAddrHi */
250 __le32 rxStatus; 250 __le32 rxStatus;
251#define TYPHOON_RX_ERR_INTERNAL __constant_cpu_to_le32(0x00000000) 251#define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000)
252#define TYPHOON_RX_ERR_FIFO_UNDERRUN __constant_cpu_to_le32(0x00000001) 252#define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001)
253#define TYPHOON_RX_ERR_BAD_SSD __constant_cpu_to_le32(0x00000002) 253#define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002)
254#define TYPHOON_RX_ERR_RUNT __constant_cpu_to_le32(0x00000003) 254#define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003)
255#define TYPHOON_RX_ERR_CRC __constant_cpu_to_le32(0x00000004) 255#define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004)
256#define TYPHOON_RX_ERR_OVERSIZE __constant_cpu_to_le32(0x00000005) 256#define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005)
257#define TYPHOON_RX_ERR_ALIGN __constant_cpu_to_le32(0x00000006) 257#define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006)
258#define TYPHOON_RX_ERR_DRIBBLE __constant_cpu_to_le32(0x00000007) 258#define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007)
259#define TYPHOON_RX_PROTO_MASK __constant_cpu_to_le32(0x00000003) 259#define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003)
260#define TYPHOON_RX_PROTO_UNKNOWN __constant_cpu_to_le32(0x00000000) 260#define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000)
261#define TYPHOON_RX_PROTO_IP __constant_cpu_to_le32(0x00000001) 261#define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001)
262#define TYPHOON_RX_PROTO_IPX __constant_cpu_to_le32(0x00000002) 262#define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002)
263#define TYPHOON_RX_VLAN __constant_cpu_to_le32(0x00000004) 263#define TYPHOON_RX_VLAN cpu_to_le32(0x00000004)
264#define TYPHOON_RX_IP_FRAG __constant_cpu_to_le32(0x00000008) 264#define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008)
265#define TYPHOON_RX_IPSEC __constant_cpu_to_le32(0x00000010) 265#define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010)
266#define TYPHOON_RX_IP_CHK_FAIL __constant_cpu_to_le32(0x00000020) 266#define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020)
267#define TYPHOON_RX_TCP_CHK_FAIL __constant_cpu_to_le32(0x00000040) 267#define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040)
268#define TYPHOON_RX_UDP_CHK_FAIL __constant_cpu_to_le32(0x00000080) 268#define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080)
269#define TYPHOON_RX_IP_CHK_GOOD __constant_cpu_to_le32(0x00000100) 269#define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100)
270#define TYPHOON_RX_TCP_CHK_GOOD __constant_cpu_to_le32(0x00000200) 270#define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200)
271#define TYPHOON_RX_UDP_CHK_GOOD __constant_cpu_to_le32(0x00000400) 271#define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400)
272 __le16 filterResults; 272 __le16 filterResults;
273#define TYPHOON_RX_FILTER_MASK __constant_cpu_to_le16(0x7fff) 273#define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff)
274#define TYPHOON_RX_FILTERED __constant_cpu_to_le16(0x8000) 274#define TYPHOON_RX_FILTERED cpu_to_le16(0x8000)
275 __le16 ipsecResults; 275 __le16 ipsecResults;
276#define TYPHOON_RX_OUTER_AH_GOOD __constant_cpu_to_le16(0x0001) 276#define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001)
277#define TYPHOON_RX_OUTER_ESP_GOOD __constant_cpu_to_le16(0x0002) 277#define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002)
278#define TYPHOON_RX_INNER_AH_GOOD __constant_cpu_to_le16(0x0004) 278#define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004)
279#define TYPHOON_RX_INNER_ESP_GOOD __constant_cpu_to_le16(0x0008) 279#define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008)
280#define TYPHOON_RX_OUTER_AH_FAIL __constant_cpu_to_le16(0x0010) 280#define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010)
281#define TYPHOON_RX_OUTER_ESP_FAIL __constant_cpu_to_le16(0x0020) 281#define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020)
282#define TYPHOON_RX_INNER_AH_FAIL __constant_cpu_to_le16(0x0040) 282#define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040)
283#define TYPHOON_RX_INNER_ESP_FAIL __constant_cpu_to_le16(0x0080) 283#define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080)
284#define TYPHOON_RX_UNKNOWN_SA __constant_cpu_to_le16(0x0100) 284#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100)
285#define TYPHOON_RX_ESP_FORMAT_ERR __constant_cpu_to_le16(0x0200) 285#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200)
286 __be32 vlanTag; 286 __be32 vlanTag;
287} __attribute__ ((packed)); 287} __attribute__ ((packed));
288 288
@@ -318,31 +318,31 @@ struct cmd_desc {
318 u8 flags; 318 u8 flags;
319 u8 numDesc; 319 u8 numDesc;
320 __le16 cmd; 320 __le16 cmd;
321#define TYPHOON_CMD_TX_ENABLE __constant_cpu_to_le16(0x0001) 321#define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001)
322#define TYPHOON_CMD_TX_DISABLE __constant_cpu_to_le16(0x0002) 322#define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002)
323#define TYPHOON_CMD_RX_ENABLE __constant_cpu_to_le16(0x0003) 323#define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003)
324#define TYPHOON_CMD_RX_DISABLE __constant_cpu_to_le16(0x0004) 324#define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004)
325#define TYPHOON_CMD_SET_RX_FILTER __constant_cpu_to_le16(0x0005) 325#define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005)
326#define TYPHOON_CMD_READ_STATS __constant_cpu_to_le16(0x0007) 326#define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007)
327#define TYPHOON_CMD_XCVR_SELECT __constant_cpu_to_le16(0x0013) 327#define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013)
328#define TYPHOON_CMD_SET_MAX_PKT_SIZE __constant_cpu_to_le16(0x001a) 328#define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a)
329#define TYPHOON_CMD_READ_MEDIA_STATUS __constant_cpu_to_le16(0x001b) 329#define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b)
330#define TYPHOON_CMD_GOTO_SLEEP __constant_cpu_to_le16(0x0023) 330#define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023)
331#define TYPHOON_CMD_SET_MULTICAST_HASH __constant_cpu_to_le16(0x0025) 331#define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025)
332#define TYPHOON_CMD_SET_MAC_ADDRESS __constant_cpu_to_le16(0x0026) 332#define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026)
333#define TYPHOON_CMD_READ_MAC_ADDRESS __constant_cpu_to_le16(0x0027) 333#define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027)
334#define TYPHOON_CMD_VLAN_TYPE_WRITE __constant_cpu_to_le16(0x002b) 334#define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b)
335#define TYPHOON_CMD_CREATE_SA __constant_cpu_to_le16(0x0034) 335#define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034)
336#define TYPHOON_CMD_DELETE_SA __constant_cpu_to_le16(0x0035) 336#define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035)
337#define TYPHOON_CMD_READ_VERSIONS __constant_cpu_to_le16(0x0043) 337#define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043)
338#define TYPHOON_CMD_IRQ_COALESCE_CTRL __constant_cpu_to_le16(0x0045) 338#define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045)
339#define TYPHOON_CMD_ENABLE_WAKE_EVENTS __constant_cpu_to_le16(0x0049) 339#define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049)
340#define TYPHOON_CMD_SET_OFFLOAD_TASKS __constant_cpu_to_le16(0x004f) 340#define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f)
341#define TYPHOON_CMD_HELLO_RESP __constant_cpu_to_le16(0x0057) 341#define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057)
342#define TYPHOON_CMD_HALT __constant_cpu_to_le16(0x005d) 342#define TYPHOON_CMD_HALT cpu_to_le16(0x005d)
343#define TYPHOON_CMD_READ_IPSEC_INFO __constant_cpu_to_le16(0x005e) 343#define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e)
344#define TYPHOON_CMD_GET_IPSEC_ENABLE __constant_cpu_to_le16(0x0067) 344#define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067)
345#define TYPHOON_CMD_GET_CMD_LVL __constant_cpu_to_le16(0x0069) 345#define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069)
346 u16 seqNo; 346 u16 seqNo;
347 __le16 parm1; 347 __le16 parm1;
348 __le32 parm2; 348 __le32 parm2;
@@ -380,11 +380,11 @@ struct resp_desc {
380 380
381/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1) 381/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1)
382 */ 382 */
383#define TYPHOON_RX_FILTER_DIRECTED __constant_cpu_to_le16(0x0001) 383#define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001)
384#define TYPHOON_RX_FILTER_ALL_MCAST __constant_cpu_to_le16(0x0002) 384#define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002)
385#define TYPHOON_RX_FILTER_BROADCAST __constant_cpu_to_le16(0x0004) 385#define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004)
386#define TYPHOON_RX_FILTER_PROMISCOUS __constant_cpu_to_le16(0x0008) 386#define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008)
387#define TYPHOON_RX_FILTER_MCAST_HASH __constant_cpu_to_le16(0x0010) 387#define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010)
388 388
389/* TYPHOON_CMD_READ_STATS response format 389/* TYPHOON_CMD_READ_STATS response format
390 */ 390 */
@@ -416,40 +416,40 @@ struct stats_resp {
416 __le32 rxOverflow; 416 __le32 rxOverflow;
417 __le32 rxFiltered; 417 __le32 rxFiltered;
418 __le32 linkStatus; 418 __le32 linkStatus;
419#define TYPHOON_LINK_STAT_MASK __constant_cpu_to_le32(0x00000001) 419#define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001)
420#define TYPHOON_LINK_GOOD __constant_cpu_to_le32(0x00000001) 420#define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001)
421#define TYPHOON_LINK_BAD __constant_cpu_to_le32(0x00000000) 421#define TYPHOON_LINK_BAD cpu_to_le32(0x00000000)
422#define TYPHOON_LINK_SPEED_MASK __constant_cpu_to_le32(0x00000002) 422#define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002)
423#define TYPHOON_LINK_100MBPS __constant_cpu_to_le32(0x00000002) 423#define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002)
424#define TYPHOON_LINK_10MBPS __constant_cpu_to_le32(0x00000000) 424#define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000)
425#define TYPHOON_LINK_DUPLEX_MASK __constant_cpu_to_le32(0x00000004) 425#define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004)
426#define TYPHOON_LINK_FULL_DUPLEX __constant_cpu_to_le32(0x00000004) 426#define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004)
427#define TYPHOON_LINK_HALF_DUPLEX __constant_cpu_to_le32(0x00000000) 427#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000)
428 __le32 unused2; 428 __le32 unused2;
429 __le32 unused3; 429 __le32 unused3;
430} __attribute__ ((packed)); 430} __attribute__ ((packed));
431 431
432/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) 432/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1)
433 */ 433 */
434#define TYPHOON_XCVR_10HALF __constant_cpu_to_le16(0x0000) 434#define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000)
435#define TYPHOON_XCVR_10FULL __constant_cpu_to_le16(0x0001) 435#define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001)
436#define TYPHOON_XCVR_100HALF __constant_cpu_to_le16(0x0002) 436#define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002)
437#define TYPHOON_XCVR_100FULL __constant_cpu_to_le16(0x0003) 437#define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003)
438#define TYPHOON_XCVR_AUTONEG __constant_cpu_to_le16(0x0004) 438#define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004)
439 439
440/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1) 440/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1)
441 */ 441 */
442#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE __constant_cpu_to_le16(0x0004) 442#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004)
443#define TYPHOON_MEDIA_STAT_COLLISION_DETECT __constant_cpu_to_le16(0x0010) 443#define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010)
444#define TYPHOON_MEDIA_STAT_CARRIER_SENSE __constant_cpu_to_le16(0x0020) 444#define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020)
445#define TYPHOON_MEDIA_STAT_POLARITY_REV __constant_cpu_to_le16(0x0400) 445#define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400)
446#define TYPHOON_MEDIA_STAT_NO_LINK __constant_cpu_to_le16(0x0800) 446#define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800)
447 447
448/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1) 448/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1)
449 */ 449 */
450#define TYPHOON_MCAST_HASH_DISABLE __constant_cpu_to_le16(0x0000) 450#define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000)
451#define TYPHOON_MCAST_HASH_ENABLE __constant_cpu_to_le16(0x0001) 451#define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001)
452#define TYPHOON_MCAST_HASH_SET __constant_cpu_to_le16(0x0002) 452#define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002)
453 453
454/* TYPHOON_CMD_CREATE_SA descriptor and settings 454/* TYPHOON_CMD_CREATE_SA descriptor and settings
455 */ 455 */
@@ -459,9 +459,9 @@ struct sa_descriptor {
459 u16 cmd; 459 u16 cmd;
460 u16 seqNo; 460 u16 seqNo;
461 u16 mode; 461 u16 mode;
462#define TYPHOON_SA_MODE_NULL __constant_cpu_to_le16(0x0000) 462#define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000)
463#define TYPHOON_SA_MODE_AH __constant_cpu_to_le16(0x0001) 463#define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001)
464#define TYPHOON_SA_MODE_ESP __constant_cpu_to_le16(0x0002) 464#define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002)
465 u8 hashFlags; 465 u8 hashFlags;
466#define TYPHOON_SA_HASH_ENABLE 0x01 466#define TYPHOON_SA_HASH_ENABLE 0x01
467#define TYPHOON_SA_HASH_SHA1 0x02 467#define TYPHOON_SA_HASH_SHA1 0x02
@@ -493,22 +493,22 @@ struct sa_descriptor {
493/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) 493/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx))
494 * This is all for IPv4. 494 * This is all for IPv4.
495 */ 495 */
496#define TYPHOON_OFFLOAD_TCP_CHKSUM __constant_cpu_to_le32(0x00000002) 496#define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002)
497#define TYPHOON_OFFLOAD_UDP_CHKSUM __constant_cpu_to_le32(0x00000004) 497#define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004)
498#define TYPHOON_OFFLOAD_IP_CHKSUM __constant_cpu_to_le32(0x00000008) 498#define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008)
499#define TYPHOON_OFFLOAD_IPSEC __constant_cpu_to_le32(0x00000010) 499#define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010)
500#define TYPHOON_OFFLOAD_BCAST_THROTTLE __constant_cpu_to_le32(0x00000020) 500#define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020)
501#define TYPHOON_OFFLOAD_DHCP_PREVENT __constant_cpu_to_le32(0x00000040) 501#define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040)
502#define TYPHOON_OFFLOAD_VLAN __constant_cpu_to_le32(0x00000080) 502#define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080)
503#define TYPHOON_OFFLOAD_FILTERING __constant_cpu_to_le32(0x00000100) 503#define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100)
504#define TYPHOON_OFFLOAD_TCP_SEGMENT __constant_cpu_to_le32(0x00000200) 504#define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200)
505 505
506/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1) 506/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1)
507 */ 507 */
508#define TYPHOON_WAKE_MAGIC_PKT __constant_cpu_to_le16(0x01) 508#define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01)
509#define TYPHOON_WAKE_LINK_EVENT __constant_cpu_to_le16(0x02) 509#define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02)
510#define TYPHOON_WAKE_ICMP_ECHO __constant_cpu_to_le16(0x04) 510#define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04)
511#define TYPHOON_WAKE_ARP __constant_cpu_to_le16(0x08) 511#define TYPHOON_WAKE_ARP cpu_to_le16(0x08)
512 512
513/* These are used to load the firmware image on the NIC 513/* These are used to load the firmware image on the NIC
514 */ 514 */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 11441225bf41..6def6f826a54 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3251,7 +3251,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
3251 howmany += ucc_geth_rx(ugeth, i, budget - howmany); 3251 howmany += ucc_geth_rx(ugeth, i, budget - howmany);
3252 3252
3253 if (howmany < budget) { 3253 if (howmany < budget) {
3254 netif_rx_complete(napi); 3254 napi_complete(napi);
3255 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS); 3255 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS);
3256 } 3256 }
3257 3257
@@ -3282,10 +3282,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3282 3282
3283 /* check for receive events that require processing */ 3283 /* check for receive events that require processing */
3284 if (ucce & UCCE_RX_EVENTS) { 3284 if (ucce & UCCE_RX_EVENTS) {
3285 if (netif_rx_schedule_prep(&ugeth->napi)) { 3285 if (napi_schedule_prep(&ugeth->napi)) {
3286 uccm &= ~UCCE_RX_EVENTS; 3286 uccm &= ~UCCE_RX_EVENTS;
3287 out_be32(uccf->p_uccm, uccm); 3287 out_be32(uccf->p_uccm, uccm);
3288 __netif_rx_schedule(&ugeth->napi); 3288 __napi_schedule(&ugeth->napi);
3289 } 3289 }
3290 } 3290 }
3291 3291
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 5574abe29c73..5b0b9647382c 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -55,7 +55,6 @@ struct smsc95xx_priv {
55 55
56struct usb_context { 56struct usb_context {
57 struct usb_ctrlrequest req; 57 struct usb_ctrlrequest req;
58 struct completion notify;
59 struct usbnet *dev; 58 struct usbnet *dev;
60}; 59};
61 60
@@ -307,7 +306,7 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
307 return 0; 306 return 0;
308} 307}
309 308
310static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs) 309static void smsc95xx_async_cmd_callback(struct urb *urb)
311{ 310{
312 struct usb_context *usb_context = urb->context; 311 struct usb_context *usb_context = urb->context;
313 struct usbnet *dev = usb_context->dev; 312 struct usbnet *dev = usb_context->dev;
@@ -316,8 +315,6 @@ static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs)
316 if (status < 0) 315 if (status < 0)
317 devwarn(dev, "async callback failed with %d", status); 316 devwarn(dev, "async callback failed with %d", status);
318 317
319 complete(&usb_context->notify);
320
321 kfree(usb_context); 318 kfree(usb_context);
322 usb_free_urb(urb); 319 usb_free_urb(urb);
323} 320}
@@ -348,11 +345,10 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
348 usb_context->req.wValue = 00; 345 usb_context->req.wValue = 00;
349 usb_context->req.wIndex = cpu_to_le16(index); 346 usb_context->req.wIndex = cpu_to_le16(index);
350 usb_context->req.wLength = cpu_to_le16(size); 347 usb_context->req.wLength = cpu_to_le16(size);
351 init_completion(&usb_context->notify);
352 348
353 usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), 349 usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
354 (void *)&usb_context->req, data, size, 350 (void *)&usb_context->req, data, size,
355 (usb_complete_t)smsc95xx_async_cmd_callback, 351 smsc95xx_async_cmd_callback,
356 (void *)usb_context); 352 (void *)usb_context);
357 353
358 status = usb_submit_urb(urb, GFP_ATOMIC); 354 status = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 3b8e63254277..4671436ecf0e 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -589,7 +589,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
589 work_done = rhine_rx(dev, budget); 589 work_done = rhine_rx(dev, budget);
590 590
591 if (work_done < budget) { 591 if (work_done < budget) {
592 netif_rx_complete(napi); 592 napi_complete(napi);
593 593
594 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 594 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
595 IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 595 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -1319,7 +1319,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1319 IntrPCIErr | IntrStatsMax | IntrLinkChange, 1319 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1320 ioaddr + IntrEnable); 1320 ioaddr + IntrEnable);
1321 1321
1322 netif_rx_schedule(&rp->napi); 1322 napi_schedule(&rp->napi);
1323 } 1323 }
1324 1324
1325 if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1325 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63ef2a8905fb..3b6225a2c7d2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -43,6 +43,7 @@ struct virtnet_info
43 struct virtqueue *rvq, *svq; 43 struct virtqueue *rvq, *svq;
44 struct net_device *dev; 44 struct net_device *dev;
45 struct napi_struct napi; 45 struct napi_struct napi;
46 unsigned int status;
46 47
47 /* The skb we couldn't send because buffers were full. */ 48 /* The skb we couldn't send because buffers were full. */
48 struct sk_buff *last_xmit_skb; 49 struct sk_buff *last_xmit_skb;
@@ -375,9 +376,9 @@ static void skb_recv_done(struct virtqueue *rvq)
375{ 376{
376 struct virtnet_info *vi = rvq->vdev->priv; 377 struct virtnet_info *vi = rvq->vdev->priv;
377 /* Schedule NAPI, Suppress further interrupts if successful. */ 378 /* Schedule NAPI, Suppress further interrupts if successful. */
378 if (netif_rx_schedule_prep(&vi->napi)) { 379 if (napi_schedule_prep(&vi->napi)) {
379 rvq->vq_ops->disable_cb(rvq); 380 rvq->vq_ops->disable_cb(rvq);
380 __netif_rx_schedule(&vi->napi); 381 __napi_schedule(&vi->napi);
381 } 382 }
382} 383}
383 384
@@ -403,11 +404,11 @@ again:
403 404
404 /* Out of packets? */ 405 /* Out of packets? */
405 if (received < budget) { 406 if (received < budget) {
406 netif_rx_complete(napi); 407 napi_complete(napi);
407 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) 408 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
408 && napi_schedule_prep(napi)) { 409 && napi_schedule_prep(napi)) {
409 vi->rvq->vq_ops->disable_cb(vi->rvq); 410 vi->rvq->vq_ops->disable_cb(vi->rvq);
410 __netif_rx_schedule(napi); 411 __napi_schedule(napi);
411 goto again; 412 goto again;
412 } 413 }
413 } 414 }
@@ -581,9 +582,9 @@ static int virtnet_open(struct net_device *dev)
581 * won't get another interrupt, so process any outstanding packets 582 * won't get another interrupt, so process any outstanding packets
582 * now. virtnet_poll wants re-enable the queue, so we disable here. 583 * now. virtnet_poll wants re-enable the queue, so we disable here.
583 * We synchronize against interrupts via NAPI_STATE_SCHED */ 584 * We synchronize against interrupts via NAPI_STATE_SCHED */
584 if (netif_rx_schedule_prep(&vi->napi)) { 585 if (napi_schedule_prep(&vi->napi)) {
585 vi->rvq->vq_ops->disable_cb(vi->rvq); 586 vi->rvq->vq_ops->disable_cb(vi->rvq);
586 __netif_rx_schedule(&vi->napi); 587 __napi_schedule(&vi->napi);
587 } 588 }
588 return 0; 589 return 0;
589} 590}
@@ -612,6 +613,7 @@ static struct ethtool_ops virtnet_ethtool_ops = {
612 .set_tx_csum = virtnet_set_tx_csum, 613 .set_tx_csum = virtnet_set_tx_csum,
613 .set_sg = ethtool_op_set_sg, 614 .set_sg = ethtool_op_set_sg,
614 .set_tso = ethtool_op_set_tso, 615 .set_tso = ethtool_op_set_tso,
616 .get_link = ethtool_op_get_link,
615}; 617};
616 618
617#define MIN_MTU 68 619#define MIN_MTU 68
@@ -637,6 +639,41 @@ static const struct net_device_ops virtnet_netdev = {
637#endif 639#endif
638}; 640};
639 641
642static void virtnet_update_status(struct virtnet_info *vi)
643{
644 u16 v;
645
646 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
647 return;
648
649 vi->vdev->config->get(vi->vdev,
650 offsetof(struct virtio_net_config, status),
651 &v, sizeof(v));
652
653 /* Ignore unknown (future) status bits */
654 v &= VIRTIO_NET_S_LINK_UP;
655
656 if (vi->status == v)
657 return;
658
659 vi->status = v;
660
661 if (vi->status & VIRTIO_NET_S_LINK_UP) {
662 netif_carrier_on(vi->dev);
663 netif_wake_queue(vi->dev);
664 } else {
665 netif_carrier_off(vi->dev);
666 netif_stop_queue(vi->dev);
667 }
668}
669
670static void virtnet_config_changed(struct virtio_device *vdev)
671{
672 struct virtnet_info *vi = vdev->priv;
673
674 virtnet_update_status(vi);
675}
676
640static int virtnet_probe(struct virtio_device *vdev) 677static int virtnet_probe(struct virtio_device *vdev)
641{ 678{
642 int err; 679 int err;
@@ -739,6 +776,9 @@ static int virtnet_probe(struct virtio_device *vdev)
739 goto unregister; 776 goto unregister;
740 } 777 }
741 778
779 vi->status = VIRTIO_NET_S_LINK_UP;
780 virtnet_update_status(vi);
781
742 pr_debug("virtnet: registered device %s\n", dev->name); 782 pr_debug("virtnet: registered device %s\n", dev->name);
743 return 0; 783 return 0;
744 784
@@ -794,7 +834,7 @@ static unsigned int features[] = {
794 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 834 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
795 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 835 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
796 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ 836 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
797 VIRTIO_NET_F_MRG_RXBUF, 837 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS,
798 VIRTIO_F_NOTIFY_ON_EMPTY, 838 VIRTIO_F_NOTIFY_ON_EMPTY,
799}; 839};
800 840
@@ -806,6 +846,7 @@ static struct virtio_driver virtio_net = {
806 .id_table = id_table, 846 .id_table = id_table,
807 .probe = virtnet_probe, 847 .probe = virtnet_probe,
808 .remove = __devexit_p(virtnet_remove), 848 .remove = __devexit_p(virtnet_remove),
849 .config_changed = virtnet_config_changed,
809}; 850};
810 851
811static int __init init(void) 852static int __init init(void)
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index b46897996f7e..9693b0fd323d 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -296,7 +296,13 @@ static void c101_destroy_card(card_t *card)
296 kfree(card); 296 kfree(card);
297} 297}
298 298
299 299static const struct net_device_ops c101_ops = {
300 .ndo_open = c101_open,
301 .ndo_stop = c101_close,
302 .ndo_change_mtu = hdlc_change_mtu,
303 .ndo_start_xmit = hdlc_start_xmit,
304 .ndo_do_ioctl = c101_ioctl,
305};
300 306
301static int __init c101_run(unsigned long irq, unsigned long winbase) 307static int __init c101_run(unsigned long irq, unsigned long winbase)
302{ 308{
@@ -367,9 +373,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
367 dev->mem_start = winbase; 373 dev->mem_start = winbase;
368 dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; 374 dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
369 dev->tx_queue_len = 50; 375 dev->tx_queue_len = 50;
370 dev->do_ioctl = c101_ioctl; 376 dev->netdev_ops = &c101_ops;
371 dev->open = c101_open;
372 dev->stop = c101_close;
373 hdlc->attach = sca_attach; 377 hdlc->attach = sca_attach;
374 hdlc->xmit = sca_xmit; 378 hdlc->xmit = sca_xmit;
375 card->settings.clock_type = CLOCK_EXT; 379 card->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index d80b72e22dea..0d7ba117ef60 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -427,6 +427,15 @@ static void __exit cosa_exit(void)
427} 427}
428module_exit(cosa_exit); 428module_exit(cosa_exit);
429 429
430static const struct net_device_ops cosa_ops = {
431 .ndo_open = cosa_net_open,
432 .ndo_stop = cosa_net_close,
433 .ndo_change_mtu = hdlc_change_mtu,
434 .ndo_start_xmit = hdlc_start_xmit,
435 .ndo_do_ioctl = cosa_net_ioctl,
436 .ndo_tx_timeout = cosa_net_timeout,
437};
438
430static int cosa_probe(int base, int irq, int dma) 439static int cosa_probe(int base, int irq, int dma)
431{ 440{
432 struct cosa_data *cosa = cosa_cards+nr_cards; 441 struct cosa_data *cosa = cosa_cards+nr_cards;
@@ -575,10 +584,7 @@ static int cosa_probe(int base, int irq, int dma)
575 } 584 }
576 dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; 585 dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
577 dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; 586 dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
578 chan->netdev->open = cosa_net_open; 587 chan->netdev->netdev_ops = &cosa_ops;
579 chan->netdev->stop = cosa_net_close;
580 chan->netdev->do_ioctl = cosa_net_ioctl;
581 chan->netdev->tx_timeout = cosa_net_timeout;
582 chan->netdev->watchdog_timeo = TX_TIMEOUT; 588 chan->netdev->watchdog_timeo = TX_TIMEOUT;
583 chan->netdev->base_addr = chan->cosa->datareg; 589 chan->netdev->base_addr = chan->cosa->datareg;
584 chan->netdev->irq = chan->cosa->irq; 590 chan->netdev->irq = chan->cosa->irq;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 888025db2f02..8face5db8f32 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -883,6 +883,15 @@ static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
883 return ret; 883 return ret;
884} 884}
885 885
886static const struct net_device_ops dscc4_ops = {
887 .ndo_open = dscc4_open,
888 .ndo_stop = dscc4_close,
889 .ndo_change_mtu = hdlc_change_mtu,
890 .ndo_start_xmit = hdlc_start_xmit,
891 .ndo_do_ioctl = dscc4_ioctl,
892 .ndo_tx_timeout = dscc4_tx_timeout,
893};
894
886static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) 895static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
887{ 896{
888 struct dscc4_pci_priv *ppriv; 897 struct dscc4_pci_priv *ppriv;
@@ -916,13 +925,8 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
916 hdlc_device *hdlc = dev_to_hdlc(d); 925 hdlc_device *hdlc = dev_to_hdlc(d);
917 926
918 d->base_addr = (unsigned long)ioaddr; 927 d->base_addr = (unsigned long)ioaddr;
919 d->init = NULL;
920 d->irq = pdev->irq; 928 d->irq = pdev->irq;
921 d->open = dscc4_open; 929 d->netdev_ops = &dscc4_ops;
922 d->stop = dscc4_close;
923 d->set_multicast_list = NULL;
924 d->do_ioctl = dscc4_ioctl;
925 d->tx_timeout = dscc4_tx_timeout;
926 d->watchdog_timeo = TX_TIMEOUT; 930 d->watchdog_timeo = TX_TIMEOUT;
927 SET_NETDEV_DEV(d, &pdev->dev); 931 SET_NETDEV_DEV(d, &pdev->dev);
928 932
@@ -1048,7 +1052,7 @@ static int dscc4_open(struct net_device *dev)
1048 struct dscc4_pci_priv *ppriv; 1052 struct dscc4_pci_priv *ppriv;
1049 int ret = -EAGAIN; 1053 int ret = -EAGAIN;
1050 1054
1051 if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit) 1055 if ((dscc4_loopback_check(dpriv) < 0))
1052 goto err; 1056 goto err;
1053 1057
1054 if ((ret = hdlc_open(dev))) 1058 if ((ret = hdlc_open(dev)))
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 48a2c9d28950..00945f7c1e9b 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2424,6 +2424,15 @@ fst_init_card(struct fst_card_info *card)
2424 type_strings[card->type], card->irq, card->nports); 2424 type_strings[card->type], card->irq, card->nports);
2425} 2425}
2426 2426
2427static const struct net_device_ops fst_ops = {
2428 .ndo_open = fst_open,
2429 .ndo_stop = fst_close,
2430 .ndo_change_mtu = hdlc_change_mtu,
2431 .ndo_start_xmit = hdlc_start_xmit,
2432 .ndo_do_ioctl = fst_ioctl,
2433 .ndo_tx_timeout = fst_tx_timeout,
2434};
2435
2427/* 2436/*
2428 * Initialise card when detected. 2437 * Initialise card when detected.
2429 * Returns 0 to indicate success, or errno otherwise. 2438 * Returns 0 to indicate success, or errno otherwise.
@@ -2565,12 +2574,9 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2565 dev->base_addr = card->pci_conf; 2574 dev->base_addr = card->pci_conf;
2566 dev->irq = card->irq; 2575 dev->irq = card->irq;
2567 2576
2568 dev->tx_queue_len = FST_TX_QUEUE_LEN; 2577 dev->netdev_ops = &fst_ops;
2569 dev->open = fst_open; 2578 dev->tx_queue_len = FST_TX_QUEUE_LEN;
2570 dev->stop = fst_close; 2579 dev->watchdog_timeo = FST_TX_TIMEOUT;
2571 dev->do_ioctl = fst_ioctl;
2572 dev->watchdog_timeo = FST_TX_TIMEOUT;
2573 dev->tx_timeout = fst_tx_timeout;
2574 hdlc->attach = fst_attach; 2580 hdlc->attach = fst_attach;
2575 hdlc->xmit = fst_start_xmit; 2581 hdlc->xmit = fst_start_xmit;
2576 } 2582 }
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 08b3536944fe..497b003d7239 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget)
341 received = sca_rx_done(port, budget); 341 received = sca_rx_done(port, budget);
342 342
343 if (received < budget) { 343 if (received < budget) {
344 netif_rx_complete(napi); 344 napi_complete(napi);
345 enable_intr(port); 345 enable_intr(port);
346 } 346 }
347 347
@@ -359,7 +359,7 @@ static irqreturn_t sca_intr(int irq, void *dev_id)
359 if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) { 359 if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
360 handled = 1; 360 handled = 1;
361 disable_intr(port); 361 disable_intr(port);
362 netif_rx_schedule(&port->napi); 362 napi_schedule(&port->napi);
363 } 363 }
364 } 364 }
365 365
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 1f2a140c9f7c..43da8bd72973 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -44,7 +44,7 @@ static const char* version = "HDLC support module revision 1.22";
44 44
45static struct hdlc_proto *first_proto; 45static struct hdlc_proto *first_proto;
46 46
47static int hdlc_change_mtu(struct net_device *dev, int new_mtu) 47int hdlc_change_mtu(struct net_device *dev, int new_mtu)
48{ 48{
49 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU)) 49 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
50 return -EINVAL; 50 return -EINVAL;
@@ -52,15 +52,6 @@ static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
52 return 0; 52 return 0;
53} 53}
54 54
55
56
57static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
58{
59 return &dev->stats;
60}
61
62
63
64static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, 55static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
65 struct packet_type *p, struct net_device *orig_dev) 56 struct packet_type *p, struct net_device *orig_dev)
66{ 57{
@@ -75,7 +66,15 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
75 return hdlc->proto->netif_rx(skb); 66 return hdlc->proto->netif_rx(skb);
76} 67}
77 68
69int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
70{
71 hdlc_device *hdlc = dev_to_hdlc(dev);
72
73 if (hdlc->proto->xmit)
74 return hdlc->proto->xmit(skb, dev);
78 75
76 return hdlc->xmit(skb, dev); /* call hardware driver directly */
77}
79 78
80static inline void hdlc_proto_start(struct net_device *dev) 79static inline void hdlc_proto_start(struct net_device *dev)
81{ 80{
@@ -102,11 +101,11 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
102 hdlc_device *hdlc; 101 hdlc_device *hdlc;
103 unsigned long flags; 102 unsigned long flags;
104 int on; 103 int on;
105 104
106 if (dev_net(dev) != &init_net) 105 if (dev_net(dev) != &init_net)
107 return NOTIFY_DONE; 106 return NOTIFY_DONE;
108 107
109 if (dev->get_stats != hdlc_get_stats) 108 if (!(dev->priv_flags & IFF_WAN_HDLC))
110 return NOTIFY_DONE; /* not an HDLC device */ 109 return NOTIFY_DONE; /* not an HDLC device */
111 110
112 if (event != NETDEV_CHANGE) 111 if (event != NETDEV_CHANGE)
@@ -233,15 +232,13 @@ static void hdlc_setup_dev(struct net_device *dev)
233 /* Re-init all variables changed by HDLC protocol drivers, 232 /* Re-init all variables changed by HDLC protocol drivers,
234 * including ether_setup() called from hdlc_raw_eth.c. 233 * including ether_setup() called from hdlc_raw_eth.c.
235 */ 234 */
236 dev->get_stats = hdlc_get_stats;
237 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 235 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
236 dev->priv_flags = IFF_WAN_HDLC;
238 dev->mtu = HDLC_MAX_MTU; 237 dev->mtu = HDLC_MAX_MTU;
239 dev->type = ARPHRD_RAWHDLC; 238 dev->type = ARPHRD_RAWHDLC;
240 dev->hard_header_len = 16; 239 dev->hard_header_len = 16;
241 dev->addr_len = 0; 240 dev->addr_len = 0;
242 dev->header_ops = &hdlc_null_ops; 241 dev->header_ops = &hdlc_null_ops;
243
244 dev->change_mtu = hdlc_change_mtu;
245} 242}
246 243
247static void hdlc_setup(struct net_device *dev) 244static void hdlc_setup(struct net_device *dev)
@@ -339,6 +336,8 @@ MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
339MODULE_DESCRIPTION("HDLC support module"); 336MODULE_DESCRIPTION("HDLC support module");
340MODULE_LICENSE("GPL v2"); 337MODULE_LICENSE("GPL v2");
341 338
339EXPORT_SYMBOL(hdlc_change_mtu);
340EXPORT_SYMBOL(hdlc_start_xmit);
342EXPORT_SYMBOL(hdlc_open); 341EXPORT_SYMBOL(hdlc_open);
343EXPORT_SYMBOL(hdlc_close); 342EXPORT_SYMBOL(hdlc_close);
344EXPORT_SYMBOL(hdlc_ioctl); 343EXPORT_SYMBOL(hdlc_ioctl);
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 44e64b15dbd1..af3fd4fead8a 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -382,7 +382,6 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
382 382
383 memcpy(&state(hdlc)->settings, &new_settings, size); 383 memcpy(&state(hdlc)->settings, &new_settings, size);
384 spin_lock_init(&state(hdlc)->lock); 384 spin_lock_init(&state(hdlc)->lock);
385 dev->hard_start_xmit = hdlc->xmit;
386 dev->header_ops = &cisco_header_ops; 385 dev->header_ops = &cisco_header_ops;
387 dev->type = ARPHRD_CISCO; 386 dev->type = ARPHRD_CISCO;
388 netif_dormant_on(dev); 387 netif_dormant_on(dev);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index f1ddd7c3459c..70e57cebc955 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -444,18 +444,6 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
444 return 0; 444 return 0;
445} 445}
446 446
447
448
449static int pvc_change_mtu(struct net_device *dev, int new_mtu)
450{
451 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
452 return -EINVAL;
453 dev->mtu = new_mtu;
454 return 0;
455}
456
457
458
459static inline void fr_log_dlci_active(pvc_device *pvc) 447static inline void fr_log_dlci_active(pvc_device *pvc)
460{ 448{
461 printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", 449 printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
@@ -1068,6 +1056,14 @@ static void pvc_setup(struct net_device *dev)
1068 dev->addr_len = 2; 1056 dev->addr_len = 2;
1069} 1057}
1070 1058
1059static const struct net_device_ops pvc_ops = {
1060 .ndo_open = pvc_open,
1061 .ndo_stop = pvc_close,
1062 .ndo_change_mtu = hdlc_change_mtu,
1063 .ndo_start_xmit = pvc_xmit,
1064 .ndo_do_ioctl = pvc_ioctl,
1065};
1066
1071static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) 1067static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1072{ 1068{
1073 hdlc_device *hdlc = dev_to_hdlc(frad); 1069 hdlc_device *hdlc = dev_to_hdlc(frad);
@@ -1104,11 +1100,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1104 *(__be16*)dev->dev_addr = htons(dlci); 1100 *(__be16*)dev->dev_addr = htons(dlci);
1105 dlci_to_q922(dev->broadcast, dlci); 1101 dlci_to_q922(dev->broadcast, dlci);
1106 } 1102 }
1107 dev->hard_start_xmit = pvc_xmit; 1103 dev->netdev_ops = &pvc_ops;
1108 dev->open = pvc_open;
1109 dev->stop = pvc_close;
1110 dev->do_ioctl = pvc_ioctl;
1111 dev->change_mtu = pvc_change_mtu;
1112 dev->mtu = HDLC_MAX_MTU; 1104 dev->mtu = HDLC_MAX_MTU;
1113 dev->tx_queue_len = 0; 1105 dev->tx_queue_len = 0;
1114 dev->ml_priv = pvc; 1106 dev->ml_priv = pvc;
@@ -1260,8 +1252,6 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1260 state(hdlc)->dce_pvc_count = 0; 1252 state(hdlc)->dce_pvc_count = 0;
1261 } 1253 }
1262 memcpy(&state(hdlc)->settings, &new_settings, size); 1254 memcpy(&state(hdlc)->settings, &new_settings, size);
1263
1264 dev->hard_start_xmit = hdlc->xmit;
1265 dev->type = ARPHRD_FRAD; 1255 dev->type = ARPHRD_FRAD;
1266 return 0; 1256 return 0;
1267 1257
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 57fe714c1c7f..7b8a5eae201d 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -558,7 +558,6 @@ out:
558 return NET_RX_DROP; 558 return NET_RX_DROP;
559} 559}
560 560
561
562static void ppp_timer(unsigned long arg) 561static void ppp_timer(unsigned long arg)
563{ 562{
564 struct proto *proto = (struct proto *)arg; 563 struct proto *proto = (struct proto *)arg;
@@ -679,7 +678,6 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
679 ppp->keepalive_interval = 10; 678 ppp->keepalive_interval = 10;
680 ppp->keepalive_timeout = 60; 679 ppp->keepalive_timeout = 60;
681 680
682 dev->hard_start_xmit = hdlc->xmit;
683 dev->hard_header_len = sizeof(struct hdlc_header); 681 dev->hard_header_len = sizeof(struct hdlc_header);
684 dev->header_ops = &ppp_header_ops; 682 dev->header_ops = &ppp_header_ops;
685 dev->type = ARPHRD_PPP; 683 dev->type = ARPHRD_PPP;
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 8612311748f4..6e92c64ebd0f 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -30,8 +30,6 @@ static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
30 return __constant_htons(ETH_P_IP); 30 return __constant_htons(ETH_P_IP);
31} 31}
32 32
33
34
35static struct hdlc_proto proto = { 33static struct hdlc_proto proto = {
36 .type_trans = raw_type_trans, 34 .type_trans = raw_type_trans,
37 .ioctl = raw_ioctl, 35 .ioctl = raw_ioctl,
@@ -86,7 +84,6 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
86 if (result) 84 if (result)
87 return result; 85 return result;
88 memcpy(hdlc->state, &new_settings, size); 86 memcpy(hdlc->state, &new_settings, size);
89 dev->hard_start_xmit = hdlc->xmit;
90 dev->type = ARPHRD_RAWHDLC; 87 dev->type = ARPHRD_RAWHDLC;
91 netif_dormant_off(dev); 88 netif_dormant_off(dev);
92 return 0; 89 return 0;
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index a13fc3207520..49e68f5ca5f2 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -45,6 +45,7 @@ static int eth_tx(struct sk_buff *skb, struct net_device *dev)
45 45
46static struct hdlc_proto proto = { 46static struct hdlc_proto proto = {
47 .type_trans = eth_type_trans, 47 .type_trans = eth_type_trans,
48 .xmit = eth_tx,
48 .ioctl = raw_eth_ioctl, 49 .ioctl = raw_eth_ioctl,
49 .module = THIS_MODULE, 50 .module = THIS_MODULE,
50}; 51};
@@ -56,9 +57,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
56 const size_t size = sizeof(raw_hdlc_proto); 57 const size_t size = sizeof(raw_hdlc_proto);
57 raw_hdlc_proto new_settings; 58 raw_hdlc_proto new_settings;
58 hdlc_device *hdlc = dev_to_hdlc(dev); 59 hdlc_device *hdlc = dev_to_hdlc(dev);
59 int result; 60 int result, old_qlen;
60 int (*old_ch_mtu)(struct net_device *, int);
61 int old_qlen;
62 61
63 switch (ifr->ifr_settings.type) { 62 switch (ifr->ifr_settings.type) {
64 case IF_GET_PROTO: 63 case IF_GET_PROTO:
@@ -99,11 +98,8 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
99 if (result) 98 if (result)
100 return result; 99 return result;
101 memcpy(hdlc->state, &new_settings, size); 100 memcpy(hdlc->state, &new_settings, size);
102 dev->hard_start_xmit = eth_tx;
103 old_ch_mtu = dev->change_mtu;
104 old_qlen = dev->tx_queue_len; 101 old_qlen = dev->tx_queue_len;
105 ether_setup(dev); 102 ether_setup(dev);
106 dev->change_mtu = old_ch_mtu;
107 dev->tx_queue_len = old_qlen; 103 dev->tx_queue_len = old_qlen;
108 random_ether_addr(dev->dev_addr); 104 random_ether_addr(dev->dev_addr);
109 netif_dormant_off(dev); 105 netif_dormant_off(dev);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index cbcbf6f0414c..b1dc29ed1583 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -184,6 +184,7 @@ static struct hdlc_proto proto = {
184 .close = x25_close, 184 .close = x25_close,
185 .ioctl = x25_ioctl, 185 .ioctl = x25_ioctl,
186 .netif_rx = x25_rx, 186 .netif_rx = x25_rx,
187 .xmit = x25_xmit,
187 .module = THIS_MODULE, 188 .module = THIS_MODULE,
188}; 189};
189 190
@@ -213,7 +214,6 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
213 214
214 if ((result = attach_hdlc_protocol(dev, &proto, 0))) 215 if ((result = attach_hdlc_protocol(dev, &proto, 0)))
215 return result; 216 return result;
216 dev->hard_start_xmit = x25_xmit;
217 dev->type = ARPHRD_X25; 217 dev->type = ARPHRD_X25;
218 netif_dormant_off(dev); 218 netif_dormant_off(dev);
219 return 0; 219 return 0;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index af54f0cf1b35..567d4f5062d6 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -173,6 +173,14 @@ static int hostess_attach(struct net_device *dev, unsigned short encoding,
173 * Description block for a Comtrol Hostess SV11 card 173 * Description block for a Comtrol Hostess SV11 card
174 */ 174 */
175 175
176static const struct net_device_ops hostess_ops = {
177 .ndo_open = hostess_open,
178 .ndo_stop = hostess_close,
179 .ndo_change_mtu = hdlc_change_mtu,
180 .ndo_start_xmit = hdlc_start_xmit,
181 .ndo_do_ioctl = hostess_ioctl,
182};
183
176static struct z8530_dev *sv11_init(int iobase, int irq) 184static struct z8530_dev *sv11_init(int iobase, int irq)
177{ 185{
178 struct z8530_dev *sv; 186 struct z8530_dev *sv;
@@ -267,9 +275,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
267 275
268 dev_to_hdlc(netdev)->attach = hostess_attach; 276 dev_to_hdlc(netdev)->attach = hostess_attach;
269 dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; 277 dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
270 netdev->open = hostess_open; 278 netdev->netdev_ops = &hostess_ops;
271 netdev->stop = hostess_close;
272 netdev->do_ioctl = hostess_ioctl;
273 netdev->base_addr = iobase; 279 netdev->base_addr = iobase;
274 netdev->irq = irq; 280 netdev->irq = irq;
275 281
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0dbd85b0162d..3bf7d3f447db 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -622,7 +622,7 @@ static void hss_hdlc_rx_irq(void *pdev)
622 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); 622 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
623#endif 623#endif
624 qmgr_disable_irq(queue_ids[port->id].rx); 624 qmgr_disable_irq(queue_ids[port->id].rx);
625 netif_rx_schedule(&port->napi); 625 napi_schedule(&port->napi);
626} 626}
627 627
628static int hss_hdlc_poll(struct napi_struct *napi, int budget) 628static int hss_hdlc_poll(struct napi_struct *napi, int budget)
@@ -649,15 +649,15 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
649 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 649 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
650#if DEBUG_RX 650#if DEBUG_RX
651 printk(KERN_DEBUG "%s: hss_hdlc_poll" 651 printk(KERN_DEBUG "%s: hss_hdlc_poll"
652 " netif_rx_complete\n", dev->name); 652 " napi_complete\n", dev->name);
653#endif 653#endif
654 netif_rx_complete(napi); 654 napi_complete(napi);
655 qmgr_enable_irq(rxq); 655 qmgr_enable_irq(rxq);
656 if (!qmgr_stat_empty(rxq) && 656 if (!qmgr_stat_empty(rxq) &&
657 netif_rx_reschedule(napi)) { 657 napi_reschedule(napi)) {
658#if DEBUG_RX 658#if DEBUG_RX
659 printk(KERN_DEBUG "%s: hss_hdlc_poll" 659 printk(KERN_DEBUG "%s: hss_hdlc_poll"
660 " netif_rx_reschedule succeeded\n", 660 " napi_reschedule succeeded\n",
661 dev->name); 661 dev->name);
662#endif 662#endif
663 qmgr_disable_irq(rxq); 663 qmgr_disable_irq(rxq);
@@ -1069,7 +1069,7 @@ static int hss_hdlc_open(struct net_device *dev)
1069 hss_start_hdlc(port); 1069 hss_start_hdlc(port);
1070 1070
1071 /* we may already have RX data, enables IRQ */ 1071 /* we may already have RX data, enables IRQ */
1072 netif_rx_schedule(&port->napi); 1072 napi_schedule(&port->napi);
1073 return 0; 1073 return 0;
1074 1074
1075err_unlock: 1075err_unlock:
@@ -1230,6 +1230,14 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1230 * initialization 1230 * initialization
1231 ****************************************************************************/ 1231 ****************************************************************************/
1232 1232
1233static const struct net_device_ops hss_hdlc_ops = {
1234 .ndo_open = hss_hdlc_open,
1235 .ndo_stop = hss_hdlc_close,
1236 .ndo_change_mtu = hdlc_change_mtu,
1237 .ndo_start_xmit = hdlc_start_xmit,
1238 .ndo_do_ioctl = hss_hdlc_ioctl,
1239};
1240
1233static int __devinit hss_init_one(struct platform_device *pdev) 1241static int __devinit hss_init_one(struct platform_device *pdev)
1234{ 1242{
1235 struct port *port; 1243 struct port *port;
@@ -1254,9 +1262,7 @@ static int __devinit hss_init_one(struct platform_device *pdev)
1254 hdlc = dev_to_hdlc(dev); 1262 hdlc = dev_to_hdlc(dev);
1255 hdlc->attach = hss_hdlc_attach; 1263 hdlc->attach = hss_hdlc_attach;
1256 hdlc->xmit = hss_hdlc_xmit; 1264 hdlc->xmit = hss_hdlc_xmit;
1257 dev->open = hss_hdlc_open; 1265 dev->netdev_ops = &hss_hdlc_ops;
1258 dev->stop = hss_hdlc_close;
1259 dev->do_ioctl = hss_hdlc_ioctl;
1260 dev->tx_queue_len = 100; 1266 dev->tx_queue_len = 100;
1261 port->clock_type = CLOCK_EXT; 1267 port->clock_type = CLOCK_EXT;
1262 port->clock_rate = 2048000; 1268 port->clock_rate = 2048000;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index feac3b99f8fe..45b1822c962d 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -806,6 +806,16 @@ static int lmc_attach(struct net_device *dev, unsigned short encoding,
806 return -EINVAL; 806 return -EINVAL;
807} 807}
808 808
809static const struct net_device_ops lmc_ops = {
810 .ndo_open = lmc_open,
811 .ndo_stop = lmc_close,
812 .ndo_change_mtu = hdlc_change_mtu,
813 .ndo_start_xmit = hdlc_start_xmit,
814 .ndo_do_ioctl = lmc_ioctl,
815 .ndo_tx_timeout = lmc_driver_timeout,
816 .ndo_get_stats = lmc_get_stats,
817};
818
809static int __devinit lmc_init_one(struct pci_dev *pdev, 819static int __devinit lmc_init_one(struct pci_dev *pdev,
810 const struct pci_device_id *ent) 820 const struct pci_device_id *ent)
811{ 821{
@@ -849,11 +859,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
849 dev->type = ARPHRD_HDLC; 859 dev->type = ARPHRD_HDLC;
850 dev_to_hdlc(dev)->xmit = lmc_start_xmit; 860 dev_to_hdlc(dev)->xmit = lmc_start_xmit;
851 dev_to_hdlc(dev)->attach = lmc_attach; 861 dev_to_hdlc(dev)->attach = lmc_attach;
852 dev->open = lmc_open; 862 dev->netdev_ops = &lmc_ops;
853 dev->stop = lmc_close;
854 dev->get_stats = lmc_get_stats;
855 dev->do_ioctl = lmc_ioctl;
856 dev->tx_timeout = lmc_driver_timeout;
857 dev->watchdog_timeo = HZ; /* 1 second */ 863 dev->watchdog_timeo = HZ; /* 1 second */
858 dev->tx_queue_len = 100; 864 dev->tx_queue_len = 100;
859 sc->lmc_device = dev; 865 sc->lmc_device = dev;
@@ -1059,9 +1065,6 @@ static int lmc_open(struct net_device *dev)
1059 if ((err = lmc_proto_open(sc)) != 0) 1065 if ((err = lmc_proto_open(sc)) != 0)
1060 return err; 1066 return err;
1061 1067
1062 dev->do_ioctl = lmc_ioctl;
1063
1064
1065 netif_start_queue(dev); 1068 netif_start_queue(dev);
1066 sc->extra_stats.tx_tbusy0++; 1069 sc->extra_stats.tx_tbusy0++;
1067 1070
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 94b4c208b013..044a48175c42 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -51,30 +51,15 @@
51void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ 51void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
52{ 52{
53 lmc_trace(sc->lmc_device, "lmc_proto_attach in"); 53 lmc_trace(sc->lmc_device, "lmc_proto_attach in");
54 switch(sc->if_type){ 54 if (sc->if_type == LMC_NET) {
55 case LMC_PPP:
56 {
57 struct net_device *dev = sc->lmc_device;
58 dev->do_ioctl = lmc_ioctl;
59 }
60 break;
61 case LMC_NET:
62 {
63 struct net_device *dev = sc->lmc_device; 55 struct net_device *dev = sc->lmc_device;
64 /* 56 /*
65 * They set a few basics because they don't use HDLC 57 * They set a few basics because they don't use HDLC
66 */ 58 */
67 dev->flags |= IFF_POINTOPOINT; 59 dev->flags |= IFF_POINTOPOINT;
68
69 dev->hard_header_len = 0; 60 dev->hard_header_len = 0;
70 dev->addr_len = 0; 61 dev->addr_len = 0;
71 } 62 }
72 case LMC_RAW: /* Setup the task queue, maybe we should notify someone? */
73 {
74 }
75 default:
76 break;
77 }
78 lmc_trace(sc->lmc_device, "lmc_proto_attach out"); 63 lmc_trace(sc->lmc_device, "lmc_proto_attach out");
79} 64}
80 65
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 697715ae80f4..83da596e2052 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -324,7 +324,13 @@ static void n2_destroy_card(card_t *card)
324 kfree(card); 324 kfree(card);
325} 325}
326 326
327 327static const struct net_device_ops n2_ops = {
328 .ndo_open = n2_open,
329 .ndo_stop = n2_close,
330 .ndo_change_mtu = hdlc_change_mtu,
331 .ndo_start_xmit = hdlc_start_xmit,
332 .ndo_do_ioctl = n2_ioctl,
333};
328 334
329static int __init n2_run(unsigned long io, unsigned long irq, 335static int __init n2_run(unsigned long io, unsigned long irq,
330 unsigned long winbase, long valid0, long valid1) 336 unsigned long winbase, long valid0, long valid1)
@@ -460,9 +466,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
460 dev->mem_start = winbase; 466 dev->mem_start = winbase;
461 dev->mem_end = winbase + USE_WINDOWSIZE - 1; 467 dev->mem_end = winbase + USE_WINDOWSIZE - 1;
462 dev->tx_queue_len = 50; 468 dev->tx_queue_len = 50;
463 dev->do_ioctl = n2_ioctl; 469 dev->netdev_ops = &n2_ops;
464 dev->open = n2_open;
465 dev->stop = n2_close;
466 hdlc->attach = sca_attach; 470 hdlc->attach = sca_attach;
467 hdlc->xmit = sca_xmit; 471 hdlc->xmit = sca_xmit;
468 port->settings.clock_type = CLOCK_EXT; 472 port->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index f247e5d9002a..60ece54bdd94 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -287,7 +287,13 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
287 kfree(card); 287 kfree(card);
288} 288}
289 289
290 290static const struct net_device_ops pc300_ops = {
291 .ndo_open = pc300_open,
292 .ndo_stop = pc300_close,
293 .ndo_change_mtu = hdlc_change_mtu,
294 .ndo_start_xmit = hdlc_start_xmit,
295 .ndo_do_ioctl = pc300_ioctl,
296};
291 297
292static int __devinit pc300_pci_init_one(struct pci_dev *pdev, 298static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
293 const struct pci_device_id *ent) 299 const struct pci_device_id *ent)
@@ -448,9 +454,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
448 dev->mem_start = ramphys; 454 dev->mem_start = ramphys;
449 dev->mem_end = ramphys + ramsize - 1; 455 dev->mem_end = ramphys + ramsize - 1;
450 dev->tx_queue_len = 50; 456 dev->tx_queue_len = 50;
451 dev->do_ioctl = pc300_ioctl; 457 dev->netdev_ops = &pc300_ops;
452 dev->open = pc300_open;
453 dev->stop = pc300_close;
454 hdlc->attach = sca_attach; 458 hdlc->attach = sca_attach;
455 hdlc->xmit = sca_xmit; 459 hdlc->xmit = sca_xmit;
456 port->settings.clock_type = CLOCK_EXT; 460 port->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 1104d3a692f7..e035d8c57e11 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -265,7 +265,13 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
265 kfree(card); 265 kfree(card);
266} 266}
267 267
268 268static const struct net_device_ops pci200_ops = {
269 .ndo_open = pci200_open,
270 .ndo_stop = pci200_close,
271 .ndo_change_mtu = hdlc_change_mtu,
272 .ndo_start_xmit = hdlc_start_xmit,
273 .ndo_do_ioctl = pci200_ioctl,
274};
269 275
270static int __devinit pci200_pci_init_one(struct pci_dev *pdev, 276static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
271 const struct pci_device_id *ent) 277 const struct pci_device_id *ent)
@@ -395,9 +401,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
395 dev->mem_start = ramphys; 401 dev->mem_start = ramphys;
396 dev->mem_end = ramphys + ramsize - 1; 402 dev->mem_end = ramphys + ramsize - 1;
397 dev->tx_queue_len = 50; 403 dev->tx_queue_len = 50;
398 dev->do_ioctl = pci200_ioctl; 404 dev->netdev_ops = &pci200_ops;
399 dev->open = pci200_open;
400 dev->stop = pci200_close;
401 hdlc->attach = sca_attach; 405 hdlc->attach = sca_attach;
402 hdlc->xmit = sca_xmit; 406 hdlc->xmit = sca_xmit;
403 port->settings.clock_type = CLOCK_EXT; 407 port->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0941a26f6e3f..23b269027453 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -169,6 +169,14 @@ static int sealevel_attach(struct net_device *dev, unsigned short encoding,
169 return -EINVAL; 169 return -EINVAL;
170} 170}
171 171
172static const struct net_device_ops sealevel_ops = {
173 .ndo_open = sealevel_open,
174 .ndo_stop = sealevel_close,
175 .ndo_change_mtu = hdlc_change_mtu,
176 .ndo_start_xmit = hdlc_start_xmit,
177 .ndo_do_ioctl = sealevel_ioctl,
178};
179
172static int slvl_setup(struct slvl_device *sv, int iobase, int irq) 180static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
173{ 181{
174 struct net_device *dev = alloc_hdlcdev(sv); 182 struct net_device *dev = alloc_hdlcdev(sv);
@@ -177,9 +185,7 @@ static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
177 185
178 dev_to_hdlc(dev)->attach = sealevel_attach; 186 dev_to_hdlc(dev)->attach = sealevel_attach;
179 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; 187 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
180 dev->open = sealevel_open; 188 dev->netdev_ops = &sealevel_ops;
181 dev->stop = sealevel_close;
182 dev->do_ioctl = sealevel_ioctl;
183 dev->base_addr = iobase; 189 dev->base_addr = iobase;
184 dev->irq = irq; 190 dev->irq = irq;
185 191
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 4bffb67ebcae..887acb0dc807 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -547,6 +547,15 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
547 547
548#include "wanxlfw.inc" 548#include "wanxlfw.inc"
549 549
550static const struct net_device_ops wanxl_ops = {
551 .ndo_open = wanxl_open,
552 .ndo_stop = wanxl_close,
553 .ndo_change_mtu = hdlc_change_mtu,
554 .ndo_start_xmit = hdlc_start_xmit,
555 .ndo_do_ioctl = wanxl_ioctl,
556 .ndo_get_stats = wanxl_get_stats,
557};
558
550static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, 559static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
551 const struct pci_device_id *ent) 560 const struct pci_device_id *ent)
552{ 561{
@@ -777,12 +786,9 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
777 hdlc = dev_to_hdlc(dev); 786 hdlc = dev_to_hdlc(dev);
778 spin_lock_init(&port->lock); 787 spin_lock_init(&port->lock);
779 dev->tx_queue_len = 50; 788 dev->tx_queue_len = 50;
780 dev->do_ioctl = wanxl_ioctl; 789 dev->netdev_ops = &wanxl_ops;
781 dev->open = wanxl_open;
782 dev->stop = wanxl_close;
783 hdlc->attach = wanxl_attach; 790 hdlc->attach = wanxl_attach;
784 hdlc->xmit = wanxl_xmit; 791 hdlc->xmit = wanxl_xmit;
785 dev->get_stats = wanxl_get_stats;
786 port->card = card; 792 port->card = card;
787 port->node = i; 793 port->node = i;
788 get_status(port)->clocking = CLOCK_EXT; 794 get_status(port)->clocking = CLOCK_EXT;
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 63fe708e8a31..57159e4bbfe1 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -493,6 +493,14 @@ error_skb_realloc:
493 i2400m, buf, buf_len); 493 i2400m, buf, buf_len);
494} 494}
495 495
496static const struct net_device_ops i2400m_netdev_ops = {
497 .ndo_open = i2400m_open,
498 .ndo_stop = i2400m_stop,
499 .ndo_start_xmit = i2400m_hard_start_xmit,
500 .ndo_tx_timeout = i2400m_tx_timeout,
501 .ndo_change_mtu = i2400m_change_mtu,
502};
503
496 504
497/** 505/**
498 * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data 506 * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
@@ -513,11 +521,7 @@ void i2400m_netdev_setup(struct net_device *net_dev)
513 & (~IFF_BROADCAST /* i2400m is P2P */ 521 & (~IFF_BROADCAST /* i2400m is P2P */
514 & ~IFF_MULTICAST); 522 & ~IFF_MULTICAST);
515 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; 523 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
516 net_dev->open = i2400m_open; 524 net_dev->netdev_ops = &i2400m_netdev_ops;
517 net_dev->stop = i2400m_stop;
518 net_dev->hard_start_xmit = i2400m_hard_start_xmit;
519 net_dev->change_mtu = i2400m_change_mtu;
520 net_dev->tx_timeout = i2400m_tx_timeout;
521 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); 525 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
522} 526}
523EXPORT_SYMBOL_GPL(i2400m_netdev_setup); 527EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index ccaeb5c219d2..d281b6e38629 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -165,7 +165,7 @@ static int reg_show(struct seq_file *seq, void *p)
165 return 0; 165 return 0;
166} 166}
167 167
168static struct seq_operations register_seq_ops = { 168static const struct seq_operations register_seq_ops = {
169 .start = reg_start, 169 .start = reg_start,
170 .next = reg_next, 170 .next = reg_next,
171 .stop = reg_stop, 171 .stop = reg_stop,
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index ec4efd7ff3c8..50e28a0cdfee 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -629,7 +629,7 @@ static ssize_t lbs_rdrf_write(struct file *file,
629 res = -EFAULT; 629 res = -EFAULT;
630 goto out_unlock; 630 goto out_unlock;
631 } 631 }
632 priv->rf_offset = simple_strtoul((char *)buf, NULL, 16); 632 priv->rf_offset = simple_strtoul(buf, NULL, 16);
633 res = count; 633 res = count;
634out_unlock: 634out_unlock:
635 free_page(addr); 635 free_page(addr);
@@ -680,12 +680,12 @@ out_unlock:
680} 680}
681 681
682struct lbs_debugfs_files { 682struct lbs_debugfs_files {
683 char *name; 683 const char *name;
684 int perm; 684 int perm;
685 struct file_operations fops; 685 struct file_operations fops;
686}; 686};
687 687
688static struct lbs_debugfs_files debugfs_files[] = { 688static const struct lbs_debugfs_files debugfs_files[] = {
689 { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), }, 689 { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), },
690 { "getscantable", 0444, FOPS(lbs_getscantable, 690 { "getscantable", 0444, FOPS(lbs_getscantable,
691 write_file_dummy), }, 691 write_file_dummy), },
@@ -693,7 +693,7 @@ static struct lbs_debugfs_files debugfs_files[] = {
693 lbs_sleepparams_write), }, 693 lbs_sleepparams_write), },
694}; 694};
695 695
696static struct lbs_debugfs_files debugfs_events_files[] = { 696static const struct lbs_debugfs_files debugfs_events_files[] = {
697 {"low_rssi", 0644, FOPS(lbs_lowrssi_read, 697 {"low_rssi", 0644, FOPS(lbs_lowrssi_read,
698 lbs_lowrssi_write), }, 698 lbs_lowrssi_write), },
699 {"low_snr", 0644, FOPS(lbs_lowsnr_read, 699 {"low_snr", 0644, FOPS(lbs_lowsnr_read,
@@ -708,7 +708,7 @@ static struct lbs_debugfs_files debugfs_events_files[] = {
708 lbs_highsnr_write), }, 708 lbs_highsnr_write), },
709}; 709};
710 710
711static struct lbs_debugfs_files debugfs_regs_files[] = { 711static const struct lbs_debugfs_files debugfs_regs_files[] = {
712 {"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), }, 712 {"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), },
713 {"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), }, 713 {"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), },
714 {"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), }, 714 {"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), },
@@ -735,7 +735,7 @@ void lbs_debugfs_remove(void)
735void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) 735void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
736{ 736{
737 int i; 737 int i;
738 struct lbs_debugfs_files *files; 738 const struct lbs_debugfs_files *files;
739 if (!lbs_dir) 739 if (!lbs_dir)
740 goto exit; 740 goto exit;
741 741
@@ -938,7 +938,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
938 return (ssize_t)cnt; 938 return (ssize_t)cnt;
939} 939}
940 940
941static struct file_operations lbs_debug_fops = { 941static const struct file_operations lbs_debug_fops = {
942 .owner = THIS_MODULE, 942 .owner = THIS_MODULE,
943 .open = open_file_generic, 943 .open = open_file_generic,
944 .write = lbs_debugfs_write, 944 .write = lbs_debugfs_write,
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 7015f2480550..d6bf8d2ef8ea 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1125,7 +1125,7 @@ static int strip_seq_show(struct seq_file *seq, void *v)
1125} 1125}
1126 1126
1127 1127
1128static struct seq_operations strip_seq_ops = { 1128static const struct seq_operations strip_seq_ops = {
1129 .start = strip_seq_start, 1129 .start = strip_seq_start,
1130 .next = strip_seq_next, 1130 .next = strip_seq_next,
1131 .stop = strip_seq_stop, 1131 .stop = strip_seq_stop,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cd6184ee08ee..9f102a6535c4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -196,7 +196,7 @@ static void rx_refill_timeout(unsigned long data)
196{ 196{
197 struct net_device *dev = (struct net_device *)data; 197 struct net_device *dev = (struct net_device *)data;
198 struct netfront_info *np = netdev_priv(dev); 198 struct netfront_info *np = netdev_priv(dev);
199 netif_rx_schedule(&np->napi); 199 napi_schedule(&np->napi);
200} 200}
201 201
202static int netfront_tx_slot_available(struct netfront_info *np) 202static int netfront_tx_slot_available(struct netfront_info *np)
@@ -328,7 +328,7 @@ static int xennet_open(struct net_device *dev)
328 xennet_alloc_rx_buffers(dev); 328 xennet_alloc_rx_buffers(dev);
329 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 329 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
330 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 330 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
331 netif_rx_schedule(&np->napi); 331 napi_schedule(&np->napi);
332 } 332 }
333 spin_unlock_bh(&np->rx_lock); 333 spin_unlock_bh(&np->rx_lock);
334 334
@@ -979,7 +979,7 @@ err:
979 979
980 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 980 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
981 if (!more_to_do) 981 if (!more_to_do)
982 __netif_rx_complete(napi); 982 __napi_complete(napi);
983 983
984 local_irq_restore(flags); 984 local_irq_restore(flags);
985 } 985 }
@@ -1317,7 +1317,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1317 xennet_tx_buf_gc(dev); 1317 xennet_tx_buf_gc(dev);
1318 /* Under tx_lock: protects access to rx shared-ring indexes. */ 1318 /* Under tx_lock: protects access to rx shared-ring indexes. */
1319 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 1319 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1320 netif_rx_schedule(&np->napi); 1320 napi_schedule(&np->napi);
1321 } 1321 }
1322 1322
1323 spin_unlock_irqrestore(&np->tx_lock, flags); 1323 spin_unlock_irqrestore(&np->tx_lock, flags);
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index f0b15c9347d0..0a6992d8611b 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -358,6 +358,17 @@ static void znet_set_multicast_list (struct net_device *dev)
358 * multicast address configured isn't equal to IFF_ALLMULTI */ 358 * multicast address configured isn't equal to IFF_ALLMULTI */
359} 359}
360 360
361static const struct net_device_ops znet_netdev_ops = {
362 .ndo_open = znet_open,
363 .ndo_stop = znet_close,
364 .ndo_start_xmit = znet_send_packet,
365 .ndo_set_multicast_list = znet_set_multicast_list,
366 .ndo_tx_timeout = znet_tx_timeout,
367 .ndo_change_mtu = eth_change_mtu,
368 .ndo_set_mac_address = eth_mac_addr,
369 .ndo_validate_addr = eth_validate_addr,
370};
371
361/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe 372/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
362 BIOS area. We just scan for the signature, and pull the vital parameters 373 BIOS area. We just scan for the signature, and pull the vital parameters
363 out of the structure. */ 374 out of the structure. */
@@ -440,11 +451,7 @@ static int __init znet_probe (void)
440 znet->tx_end = znet->tx_start + znet->tx_buf_len; 451 znet->tx_end = znet->tx_start + znet->tx_buf_len;
441 452
442 /* The ZNET-specific entries in the device structure. */ 453 /* The ZNET-specific entries in the device structure. */
443 dev->open = &znet_open; 454 dev->netdev_ops = &znet_netdev_ops;
444 dev->hard_start_xmit = &znet_send_packet;
445 dev->stop = &znet_close;
446 dev->set_multicast_list = &znet_set_multicast_list;
447 dev->tx_timeout = znet_tx_timeout;
448 dev->watchdog_timeo = TX_TIMEOUT; 455 dev->watchdog_timeo = TX_TIMEOUT;
449 err = register_netdev(dev); 456 err = register_netdev(dev);
450 if (err) 457 if (err)