aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139too.c92
-rw-r--r--drivers/net/8390.c10
-rw-r--r--drivers/net/8390p.c19
-rw-r--r--drivers/net/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/atlx/atl1.c7
-rw-r--r--drivers/net/benet/be_main.c28
-rw-r--r--drivers/net/bmac.c16
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/cpmac.c23
-rw-r--r--drivers/net/cxgb3/adapter.h2
-rw-r--r--drivers/net/cxgb3/sge.c53
-rw-r--r--drivers/net/dm9000.c30
-rw-r--r--drivers/net/e1000/e1000_main.c13
-rw-r--r--drivers/net/e1000e/netdev.c17
-rw-r--r--drivers/net/fec.c897
-rw-r--r--drivers/net/fec.h127
-rw-r--r--drivers/net/fs_enet/mii-fec.c6
-rw-r--r--drivers/net/hplance.c21
-rw-r--r--drivers/net/ibmveth.c26
-rw-r--r--drivers/net/igb/igb_main.c13
-rw-r--r--drivers/net/irda/au1k_ir.c18
-rw-r--r--drivers/net/irda/pxaficp_ir.c16
-rw-r--r--drivers/net/irda/sa1100_ir.c18
-rw-r--r--drivers/net/iseries_veth.c17
-rw-r--r--drivers/net/ixgb/ixgb_main.c13
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c186
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c249
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c9
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c18
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c19
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h18
-rw-r--r--drivers/net/ixp2000/ixpdev.c19
-rw-r--r--drivers/net/jazzsonic.c19
-rw-r--r--drivers/net/korina.c25
-rw-r--r--drivers/net/lib82596.c23
-rw-r--r--drivers/net/loopback.c21
-rw-r--r--drivers/net/mace.c16
-rw-r--r--drivers/net/macmace.c18
-rw-r--r--drivers/net/macvlan.c22
-rw-r--r--drivers/net/meth.c28
-rw-r--r--drivers/net/mipsnet.c15
-rw-r--r--drivers/net/mvme147.c17
-rw-r--r--drivers/net/myri10ge/myri10ge.c38
-rw-r--r--drivers/net/netx-eth.c17
-rw-r--r--drivers/net/netxen/netxen_nic.h594
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c204
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c105
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h8
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c393
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h66
-rw-r--r--drivers/net/netxen/netxen_nic_init.c291
-rw-r--r--drivers/net/netxen/netxen_nic_main.c268
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c203
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h27
-rw-r--r--drivers/net/pasemi_mac.c23
-rw-r--r--drivers/net/pci-skeleton.c19
-rw-r--r--drivers/net/pppol2tp.c3
-rw-r--r--drivers/net/r8169.c61
-rw-r--r--drivers/net/rionet.c14
-rw-r--r--drivers/net/sb1250-mac.c29
-rw-r--r--drivers/net/sfc/boards.c2
-rw-r--r--drivers/net/sfc/rx.c26
-rw-r--r--drivers/net/sfc/sfe4001.c3
-rw-r--r--drivers/net/sgiseeq.c18
-rw-r--r--drivers/net/smc911x.c23
-rw-r--r--drivers/net/sun3lance.c17
-rw-r--r--drivers/net/tg3.c89
-rw-r--r--drivers/net/tg3.h6
-rw-r--r--drivers/net/tulip/winbond-840.c3
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/ucc_geth.c31
-rw-r--r--drivers/net/ucc_geth.h1
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc_ether.c33
-rw-r--r--drivers/net/usb/int51x1.c253
-rw-r--r--drivers/net/usb/kaweth.c33
-rw-r--r--drivers/net/usb/usbnet.c37
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/wan/pc300_drv.c20
-rw-r--r--drivers/net/wireless/at76c50x-usb.c3
-rw-r--r--drivers/net/wireless/ath5k/reset.c5
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c7
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c4
-rw-r--r--drivers/net/wireless/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c3
89 files changed, 2848 insertions, 2400 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 1fc45431a620..d90177509bf6 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2292,11 +2292,11 @@ static int rtl8139_close (struct net_device *dev)
2292 other threads or interrupts aren't messing with the 8139. */ 2292 other threads or interrupts aren't messing with the 8139. */
2293static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2293static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2294{ 2294{
2295 struct rtl8139_private *np = netdev_priv(dev); 2295 struct rtl8139_private *tp = netdev_priv(dev);
2296 void __iomem *ioaddr = np->mmio_addr; 2296 void __iomem *ioaddr = tp->mmio_addr;
2297 2297
2298 spin_lock_irq(&np->lock); 2298 spin_lock_irq(&tp->lock);
2299 if (rtl_chip_info[np->chipset].flags & HasLWake) { 2299 if (rtl_chip_info[tp->chipset].flags & HasLWake) {
2300 u8 cfg3 = RTL_R8 (Config3); 2300 u8 cfg3 = RTL_R8 (Config3);
2301 u8 cfg5 = RTL_R8 (Config5); 2301 u8 cfg5 = RTL_R8 (Config5);
2302 2302
@@ -2317,7 +2317,7 @@ static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2317 if (cfg5 & Cfg5_BWF) 2317 if (cfg5 & Cfg5_BWF)
2318 wol->wolopts |= WAKE_BCAST; 2318 wol->wolopts |= WAKE_BCAST;
2319 } 2319 }
2320 spin_unlock_irq(&np->lock); 2320 spin_unlock_irq(&tp->lock);
2321} 2321}
2322 2322
2323 2323
@@ -2326,19 +2326,19 @@ static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2326 aren't messing with the 8139. */ 2326 aren't messing with the 8139. */
2327static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2327static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2328{ 2328{
2329 struct rtl8139_private *np = netdev_priv(dev); 2329 struct rtl8139_private *tp = netdev_priv(dev);
2330 void __iomem *ioaddr = np->mmio_addr; 2330 void __iomem *ioaddr = tp->mmio_addr;
2331 u32 support; 2331 u32 support;
2332 u8 cfg3, cfg5; 2332 u8 cfg3, cfg5;
2333 2333
2334 support = ((rtl_chip_info[np->chipset].flags & HasLWake) 2334 support = ((rtl_chip_info[tp->chipset].flags & HasLWake)
2335 ? (WAKE_PHY | WAKE_MAGIC 2335 ? (WAKE_PHY | WAKE_MAGIC
2336 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST) 2336 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)
2337 : 0); 2337 : 0);
2338 if (wol->wolopts & ~support) 2338 if (wol->wolopts & ~support)
2339 return -EINVAL; 2339 return -EINVAL;
2340 2340
2341 spin_lock_irq(&np->lock); 2341 spin_lock_irq(&tp->lock);
2342 cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic); 2342 cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic);
2343 if (wol->wolopts & WAKE_PHY) 2343 if (wol->wolopts & WAKE_PHY)
2344 cfg3 |= Cfg3_LinkUp; 2344 cfg3 |= Cfg3_LinkUp;
@@ -2359,87 +2359,87 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2359 if (wol->wolopts & WAKE_BCAST) 2359 if (wol->wolopts & WAKE_BCAST)
2360 cfg5 |= Cfg5_BWF; 2360 cfg5 |= Cfg5_BWF;
2361 RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */ 2361 RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */
2362 spin_unlock_irq(&np->lock); 2362 spin_unlock_irq(&tp->lock);
2363 2363
2364 return 0; 2364 return 0;
2365} 2365}
2366 2366
2367static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2367static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2368{ 2368{
2369 struct rtl8139_private *np = netdev_priv(dev); 2369 struct rtl8139_private *tp = netdev_priv(dev);
2370 strcpy(info->driver, DRV_NAME); 2370 strcpy(info->driver, DRV_NAME);
2371 strcpy(info->version, DRV_VERSION); 2371 strcpy(info->version, DRV_VERSION);
2372 strcpy(info->bus_info, pci_name(np->pci_dev)); 2372 strcpy(info->bus_info, pci_name(tp->pci_dev));
2373 info->regdump_len = np->regs_len; 2373 info->regdump_len = tp->regs_len;
2374} 2374}
2375 2375
2376static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2376static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2377{ 2377{
2378 struct rtl8139_private *np = netdev_priv(dev); 2378 struct rtl8139_private *tp = netdev_priv(dev);
2379 spin_lock_irq(&np->lock); 2379 spin_lock_irq(&tp->lock);
2380 mii_ethtool_gset(&np->mii, cmd); 2380 mii_ethtool_gset(&tp->mii, cmd);
2381 spin_unlock_irq(&np->lock); 2381 spin_unlock_irq(&tp->lock);
2382 return 0; 2382 return 0;
2383} 2383}
2384 2384
2385static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2385static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2386{ 2386{
2387 struct rtl8139_private *np = netdev_priv(dev); 2387 struct rtl8139_private *tp = netdev_priv(dev);
2388 int rc; 2388 int rc;
2389 spin_lock_irq(&np->lock); 2389 spin_lock_irq(&tp->lock);
2390 rc = mii_ethtool_sset(&np->mii, cmd); 2390 rc = mii_ethtool_sset(&tp->mii, cmd);
2391 spin_unlock_irq(&np->lock); 2391 spin_unlock_irq(&tp->lock);
2392 return rc; 2392 return rc;
2393} 2393}
2394 2394
2395static int rtl8139_nway_reset(struct net_device *dev) 2395static int rtl8139_nway_reset(struct net_device *dev)
2396{ 2396{
2397 struct rtl8139_private *np = netdev_priv(dev); 2397 struct rtl8139_private *tp = netdev_priv(dev);
2398 return mii_nway_restart(&np->mii); 2398 return mii_nway_restart(&tp->mii);
2399} 2399}
2400 2400
2401static u32 rtl8139_get_link(struct net_device *dev) 2401static u32 rtl8139_get_link(struct net_device *dev)
2402{ 2402{
2403 struct rtl8139_private *np = netdev_priv(dev); 2403 struct rtl8139_private *tp = netdev_priv(dev);
2404 return mii_link_ok(&np->mii); 2404 return mii_link_ok(&tp->mii);
2405} 2405}
2406 2406
2407static u32 rtl8139_get_msglevel(struct net_device *dev) 2407static u32 rtl8139_get_msglevel(struct net_device *dev)
2408{ 2408{
2409 struct rtl8139_private *np = netdev_priv(dev); 2409 struct rtl8139_private *tp = netdev_priv(dev);
2410 return np->msg_enable; 2410 return tp->msg_enable;
2411} 2411}
2412 2412
2413static void rtl8139_set_msglevel(struct net_device *dev, u32 datum) 2413static void rtl8139_set_msglevel(struct net_device *dev, u32 datum)
2414{ 2414{
2415 struct rtl8139_private *np = netdev_priv(dev); 2415 struct rtl8139_private *tp = netdev_priv(dev);
2416 np->msg_enable = datum; 2416 tp->msg_enable = datum;
2417} 2417}
2418 2418
2419static int rtl8139_get_regs_len(struct net_device *dev) 2419static int rtl8139_get_regs_len(struct net_device *dev)
2420{ 2420{
2421 struct rtl8139_private *np; 2421 struct rtl8139_private *tp;
2422 /* TODO: we are too slack to do reg dumping for pio, for now */ 2422 /* TODO: we are too slack to do reg dumping for pio, for now */
2423 if (use_io) 2423 if (use_io)
2424 return 0; 2424 return 0;
2425 np = netdev_priv(dev); 2425 tp = netdev_priv(dev);
2426 return np->regs_len; 2426 return tp->regs_len;
2427} 2427}
2428 2428
2429static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) 2429static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
2430{ 2430{
2431 struct rtl8139_private *np; 2431 struct rtl8139_private *tp;
2432 2432
2433 /* TODO: we are too slack to do reg dumping for pio, for now */ 2433 /* TODO: we are too slack to do reg dumping for pio, for now */
2434 if (use_io) 2434 if (use_io)
2435 return; 2435 return;
2436 np = netdev_priv(dev); 2436 tp = netdev_priv(dev);
2437 2437
2438 regs->version = RTL_REGS_VER; 2438 regs->version = RTL_REGS_VER;
2439 2439
2440 spin_lock_irq(&np->lock); 2440 spin_lock_irq(&tp->lock);
2441 memcpy_fromio(regbuf, np->mmio_addr, regs->len); 2441 memcpy_fromio(regbuf, tp->mmio_addr, regs->len);
2442 spin_unlock_irq(&np->lock); 2442 spin_unlock_irq(&tp->lock);
2443} 2443}
2444 2444
2445static int rtl8139_get_sset_count(struct net_device *dev, int sset) 2445static int rtl8139_get_sset_count(struct net_device *dev, int sset)
@@ -2454,12 +2454,12 @@ static int rtl8139_get_sset_count(struct net_device *dev, int sset)
2454 2454
2455static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) 2455static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
2456{ 2456{
2457 struct rtl8139_private *np = netdev_priv(dev); 2457 struct rtl8139_private *tp = netdev_priv(dev);
2458 2458
2459 data[0] = np->xstats.early_rx; 2459 data[0] = tp->xstats.early_rx;
2460 data[1] = np->xstats.tx_buf_mapped; 2460 data[1] = tp->xstats.tx_buf_mapped;
2461 data[2] = np->xstats.tx_timeouts; 2461 data[2] = tp->xstats.tx_timeouts;
2462 data[3] = np->xstats.rx_lost_in_ring; 2462 data[3] = tp->xstats.rx_lost_in_ring;
2463} 2463}
2464 2464
2465static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2465static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2486,15 +2486,15 @@ static const struct ethtool_ops rtl8139_ethtool_ops = {
2486 2486
2487static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2487static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2488{ 2488{
2489 struct rtl8139_private *np = netdev_priv(dev); 2489 struct rtl8139_private *tp = netdev_priv(dev);
2490 int rc; 2490 int rc;
2491 2491
2492 if (!netif_running(dev)) 2492 if (!netif_running(dev))
2493 return -EINVAL; 2493 return -EINVAL;
2494 2494
2495 spin_lock_irq(&np->lock); 2495 spin_lock_irq(&tp->lock);
2496 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); 2496 rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL);
2497 spin_unlock_irq(&np->lock); 2497 spin_unlock_irq(&tp->lock);
2498 2498
2499 return rc; 2499 return rc;
2500} 2500}
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index ec3e22e6306f..21153dea8ebe 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -74,14 +74,8 @@ EXPORT_SYMBOL(ei_netdev_ops);
74struct net_device *__alloc_ei_netdev(int size) 74struct net_device *__alloc_ei_netdev(int size)
75{ 75{
76 struct net_device *dev = ____alloc_ei_netdev(size); 76 struct net_device *dev = ____alloc_ei_netdev(size);
77#ifdef CONFIG_COMPAT_NET_DEV_OPS 77 if (dev)
78 if (dev) { 78 dev->netdev_ops = &ei_netdev_ops;
79 dev->hard_start_xmit = ei_start_xmit;
80 dev->get_stats = ei_get_stats;
81 dev->set_multicast_list = ei_set_multicast_list;
82 dev->tx_timeout = ei_tx_timeout;
83 }
84#endif
85 return dev; 79 return dev;
86} 80}
87EXPORT_SYMBOL(__alloc_ei_netdev); 81EXPORT_SYMBOL(__alloc_ei_netdev);
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c
index da863c91d1d0..cacdd86a27d0 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/8390p.c
@@ -79,14 +79,8 @@ EXPORT_SYMBOL(eip_netdev_ops);
79struct net_device *__alloc_eip_netdev(int size) 79struct net_device *__alloc_eip_netdev(int size)
80{ 80{
81 struct net_device *dev = ____alloc_ei_netdev(size); 81 struct net_device *dev = ____alloc_ei_netdev(size);
82#ifdef CONFIG_COMPAT_NET_DEV_OPS 82 if (dev)
83 if (dev) { 83 dev->netdev_ops = &eip_netdev_ops;
84 dev->hard_start_xmit = eip_start_xmit;
85 dev->get_stats = eip_get_stats;
86 dev->set_multicast_list = eip_set_multicast_list;
87 dev->tx_timeout = eip_tx_timeout;
88 }
89#endif
90 return dev; 84 return dev;
91} 85}
92EXPORT_SYMBOL(__alloc_eip_netdev); 86EXPORT_SYMBOL(__alloc_eip_netdev);
@@ -97,16 +91,15 @@ void NS8390p_init(struct net_device *dev, int startp)
97} 91}
98EXPORT_SYMBOL(NS8390p_init); 92EXPORT_SYMBOL(NS8390p_init);
99 93
100#if defined(MODULE) 94static int __init 8390p_init_module(void)
101
102int init_module(void)
103{ 95{
104 return 0; 96 return 0;
105} 97}
106 98
107void cleanup_module(void) 99static void __exit 8390p_cleanup_module(void)
108{ 100{
109} 101}
110 102
111#endif /* MODULE */ 103module_init(8390p_init_module);
104module_exit(8390p_cleanup_module);
112MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index fb57b750866b..adac06195c8f 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1794,8 +1794,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
1794 memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); 1794 memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
1795 1795
1796 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); 1796 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1797 if (tx_buffer->skb) 1797 BUG_ON(tx_buffer->skb);
1798 BUG();
1799 1798
1800 tx_buffer->skb = NULL; 1799 tx_buffer->skb = NULL;
1801 tx_buffer->length = 1800 tx_buffer->length =
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0ab22540bf59..13f0bdc32449 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2207,8 +2207,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
2207 nr_frags = skb_shinfo(skb)->nr_frags; 2207 nr_frags = skb_shinfo(skb)->nr_frags;
2208 next_to_use = atomic_read(&tpd_ring->next_to_use); 2208 next_to_use = atomic_read(&tpd_ring->next_to_use);
2209 buffer_info = &tpd_ring->buffer_info[next_to_use]; 2209 buffer_info = &tpd_ring->buffer_info[next_to_use];
2210 if (unlikely(buffer_info->skb)) 2210 BUG_ON(buffer_info->skb);
2211 BUG();
2212 /* put skb in last TPD */ 2211 /* put skb in last TPD */
2213 buffer_info->skb = NULL; 2212 buffer_info->skb = NULL;
2214 2213
@@ -2274,8 +2273,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
2274 ATL1_MAX_TX_BUF_LEN; 2273 ATL1_MAX_TX_BUF_LEN;
2275 for (i = 0; i < nseg; i++) { 2274 for (i = 0; i < nseg; i++) {
2276 buffer_info = &tpd_ring->buffer_info[next_to_use]; 2275 buffer_info = &tpd_ring->buffer_info[next_to_use];
2277 if (unlikely(buffer_info->skb)) 2276 BUG_ON(buffer_info->skb);
2278 BUG(); 2277
2279 buffer_info->skb = NULL; 2278 buffer_info->skb = NULL;
2280 buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? 2279 buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
2281 ATL1_MAX_TX_BUF_LEN : buf_len; 2280 ATL1_MAX_TX_BUF_LEN : buf_len;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 9b75aa630062..8994b03d80ac 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -637,6 +637,22 @@ static void be_rx_stats_update(struct be_adapter *adapter,
637 stats->be_rx_bytes += pktsize; 637 stats->be_rx_bytes += pktsize;
638} 638}
639 639
640static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
641{
642 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
643
644 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
645 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
646 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
647 if (ip_version) {
648 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
649 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
650 }
651 ipv6_chk = (ip_version && (tcpf || udpf));
652
653 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
654}
655
640static struct be_rx_page_info * 656static struct be_rx_page_info *
641get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) 657get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
642{ 658{
@@ -752,9 +768,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
752{ 768{
753 struct sk_buff *skb; 769 struct sk_buff *skb;
754 u32 vtp, vid; 770 u32 vtp, vid;
755 int l4_cksm;
756 771
757 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
758 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 772 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
759 773
760 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 774 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
@@ -769,10 +783,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
769 783
770 skb_fill_rx_data(adapter, skb, rxcp); 784 skb_fill_rx_data(adapter, skb, rxcp);
771 785
772 if (l4_cksm && adapter->rx_csum) 786 if (do_pkt_csum(rxcp, adapter->rx_csum))
773 skb->ip_summed = CHECKSUM_UNNECESSARY;
774 else
775 skb->ip_summed = CHECKSUM_NONE; 787 skb->ip_summed = CHECKSUM_NONE;
788 else
789 skb->ip_summed = CHECKSUM_UNNECESSARY;
776 790
777 skb->truesize = skb->len + sizeof(struct sk_buff); 791 skb->truesize = skb->len + sizeof(struct sk_buff);
778 skb->protocol = eth_type_trans(skb, adapter->netdev); 792 skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -1626,10 +1640,12 @@ static void be_netdev_init(struct net_device *netdev)
1626 1640
1627 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 1641 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1628 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | 1642 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
1629 NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 1643 NETIF_F_IPV6_CSUM;
1630 1644
1631 netdev->flags |= IFF_MULTICAST; 1645 netdev->flags |= IFF_MULTICAST;
1632 1646
1647 adapter->rx_csum = true;
1648
1633 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); 1649 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
1634 1650
1635 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); 1651 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 44d015f70d1c..9578a3dfac01 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1247,6 +1247,16 @@ static const struct ethtool_ops bmac_ethtool_ops = {
1247 .get_link = ethtool_op_get_link, 1247 .get_link = ethtool_op_get_link,
1248}; 1248};
1249 1249
1250static const struct net_device_ops bmac_netdev_ops = {
1251 .ndo_open = bmac_open,
1252 .ndo_stop = bmac_close,
1253 .ndo_start_xmit = bmac_output,
1254 .ndo_set_multicast_list = bmac_set_multicast,
1255 .ndo_set_mac_address = bmac_set_address,
1256 .ndo_change_mtu = eth_change_mtu,
1257 .ndo_validate_addr = eth_validate_addr,
1258};
1259
1250static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) 1260static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1251{ 1261{
1252 int j, rev, ret; 1262 int j, rev, ret;
@@ -1308,12 +1318,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1308 bmac_enable_and_reset_chip(dev); 1318 bmac_enable_and_reset_chip(dev);
1309 bmwrite(dev, INTDISABLE, DisableAll); 1319 bmwrite(dev, INTDISABLE, DisableAll);
1310 1320
1311 dev->open = bmac_open; 1321 dev->netdev_ops = &bmac_netdev_ops;
1312 dev->stop = bmac_close;
1313 dev->ethtool_ops = &bmac_ethtool_ops; 1322 dev->ethtool_ops = &bmac_ethtool_ops;
1314 dev->hard_start_xmit = bmac_output;
1315 dev->set_multicast_list = bmac_set_multicast;
1316 dev->set_mac_address = bmac_set_address;
1317 1323
1318 bmac_get_station_address(dev, addr); 1324 bmac_get_station_address(dev, addr);
1319 if (bmac_verify_checksum(dev) != 0) 1325 if (bmac_verify_checksum(dev) != 0)
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 58f6fc055f6a..5e97a1a71d88 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1149,8 +1149,8 @@ static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1149 unsigned int len, unsigned int gen, 1149 unsigned int len, unsigned int gen,
1150 unsigned int eop) 1150 unsigned int eop)
1151{ 1151{
1152 if (unlikely(len > SGE_TX_DESC_MAX_PLEN)) 1152 BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1153 BUG(); 1153
1154 e->addr_lo = (u32)mapping; 1154 e->addr_lo = (u32)mapping;
1155 e->addr_hi = (u64)mapping >> 32; 1155 e->addr_hi = (u64)mapping >> 32;
1156 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); 1156 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 3f476c7c0736..af305c0b34d1 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1093,6 +1093,19 @@ static int cpmac_stop(struct net_device *dev)
1093 return 0; 1093 return 0;
1094} 1094}
1095 1095
1096static const struct net_device_ops cpmac_netdev_ops = {
1097 .ndo_open = cpmac_open,
1098 .ndo_stop = cpmac_stop,
1099 .ndo_start_xmit = cpmac_start_xmit,
1100 .ndo_tx_timeout = cpmac_tx_timeout,
1101 .ndo_set_multicast_list = cpmac_set_multicast_list,
1102 .ndo_so_ioctl = cpmac_ioctl,
1103 .ndo_set_config = cpmac_config,
1104 .ndo_change_mtu = eth_change_mtu,
1105 .ndo_validate_addr = eth_validate_addr,
1106 .ndo_set_mac_address = eth_mac_addr,
1107};
1108
1096static int external_switch; 1109static int external_switch;
1097 1110
1098static int __devinit cpmac_probe(struct platform_device *pdev) 1111static int __devinit cpmac_probe(struct platform_device *pdev)
@@ -1143,14 +1156,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1143 1156
1144 dev->irq = platform_get_irq_byname(pdev, "irq"); 1157 dev->irq = platform_get_irq_byname(pdev, "irq");
1145 1158
1146 dev->open = cpmac_open; 1159 dev->netdev_ops = &cpmac_netdev_ops;
1147 dev->stop = cpmac_stop; 1160 dev->ethtool_ops = &cpmac_ethtool_ops;
1148 dev->set_config = cpmac_config;
1149 dev->hard_start_xmit = cpmac_start_xmit;
1150 dev->do_ioctl = cpmac_ioctl;
1151 dev->set_multicast_list = cpmac_set_multicast_list;
1152 dev->tx_timeout = cpmac_tx_timeout;
1153 dev->ethtool_ops = &cpmac_ethtool_ops;
1154 1161
1155 netif_napi_add(dev, &priv->napi, cpmac_poll, 64); 1162 netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
1156 1163
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 714df2b675e6..322434ac42fc 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -195,7 +195,7 @@ struct sge_qset { /* an SGE queue set */
195 struct sge_rspq rspq; 195 struct sge_rspq rspq;
196 struct sge_fl fl[SGE_RXQ_PER_SET]; 196 struct sge_fl fl[SGE_RXQ_PER_SET];
197 struct sge_txq txq[SGE_TXQ_PER_SET]; 197 struct sge_txq txq[SGE_TXQ_PER_SET];
198 struct napi_gro_fraginfo lro_frag_tbl; 198 int nomem;
199 int lro_enabled; 199 int lro_enabled;
200 void *lro_va; 200 void *lro_va;
201 struct net_device *netdev; 201 struct net_device *netdev;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 26d3587f3399..73d569e758ec 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -654,7 +654,8 @@ static void t3_reset_qset(struct sge_qset *q)
654 q->txq_stopped = 0; 654 q->txq_stopped = 0;
655 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ 655 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
656 q->rx_reclaim_timer.function = NULL; 656 q->rx_reclaim_timer.function = NULL;
657 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0; 657 q->nomem = 0;
658 napi_free_frags(&q->napi);
658} 659}
659 660
660 661
@@ -2074,20 +2075,19 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2074 struct sge_fl *fl, int len, int complete) 2075 struct sge_fl *fl, int len, int complete)
2075{ 2076{
2076 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2077 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2078 struct sk_buff *skb = NULL;
2077 struct cpl_rx_pkt *cpl; 2079 struct cpl_rx_pkt *cpl;
2078 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags; 2080 struct skb_frag_struct *rx_frag;
2079 int nr_frags = qs->lro_frag_tbl.nr_frags; 2081 int nr_frags;
2080 int frag_len = qs->lro_frag_tbl.len;
2081 int offset = 0; 2082 int offset = 0;
2082 2083
2083 if (!nr_frags) { 2084 if (!qs->nomem) {
2084 offset = 2 + sizeof(struct cpl_rx_pkt); 2085 skb = napi_get_frags(&qs->napi);
2085 qs->lro_va = cpl = sd->pg_chunk.va + 2; 2086 qs->nomem = !skb;
2086 } 2087 }
2087 2088
2088 fl->credits--; 2089 fl->credits--;
2089 2090
2090 len -= offset;
2091 pci_dma_sync_single_for_cpu(adap->pdev, 2091 pci_dma_sync_single_for_cpu(adap->pdev,
2092 pci_unmap_addr(sd, dma_addr), 2092 pci_unmap_addr(sd, dma_addr),
2093 fl->buf_size - SGE_PG_RSVD, 2093 fl->buf_size - SGE_PG_RSVD,
@@ -2100,21 +2100,38 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2100 fl->alloc_size, 2100 fl->alloc_size,
2101 PCI_DMA_FROMDEVICE); 2101 PCI_DMA_FROMDEVICE);
2102 2102
2103 if (!skb) {
2104 put_page(sd->pg_chunk.page);
2105 if (complete)
2106 qs->nomem = 0;
2107 return;
2108 }
2109
2110 rx_frag = skb_shinfo(skb)->frags;
2111 nr_frags = skb_shinfo(skb)->nr_frags;
2112
2113 if (!nr_frags) {
2114 offset = 2 + sizeof(struct cpl_rx_pkt);
2115 qs->lro_va = sd->pg_chunk.va + 2;
2116 }
2117 len -= offset;
2118
2103 prefetch(qs->lro_va); 2119 prefetch(qs->lro_va);
2104 2120
2105 rx_frag += nr_frags; 2121 rx_frag += nr_frags;
2106 rx_frag->page = sd->pg_chunk.page; 2122 rx_frag->page = sd->pg_chunk.page;
2107 rx_frag->page_offset = sd->pg_chunk.offset + offset; 2123 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2108 rx_frag->size = len; 2124 rx_frag->size = len;
2109 frag_len += len;
2110 qs->lro_frag_tbl.nr_frags++;
2111 qs->lro_frag_tbl.len = frag_len;
2112 2125
2126 skb->len += len;
2127 skb->data_len += len;
2128 skb->truesize += len;
2129 skb_shinfo(skb)->nr_frags++;
2113 2130
2114 if (!complete) 2131 if (!complete)
2115 return; 2132 return;
2116 2133
2117 qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY; 2134 skb->ip_summed = CHECKSUM_UNNECESSARY;
2118 cpl = qs->lro_va; 2135 cpl = qs->lro_va;
2119 2136
2120 if (unlikely(cpl->vlan_valid)) { 2137 if (unlikely(cpl->vlan_valid)) {
@@ -2123,15 +2140,11 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2123 struct vlan_group *grp = pi->vlan_grp; 2140 struct vlan_group *grp = pi->vlan_grp;
2124 2141
2125 if (likely(grp != NULL)) { 2142 if (likely(grp != NULL)) {
2126 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan), 2143 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
2127 &qs->lro_frag_tbl); 2144 return;
2128 goto out;
2129 } 2145 }
2130 } 2146 }
2131 napi_gro_frags(&qs->napi, &qs->lro_frag_tbl); 2147 napi_gro_frags(&qs->napi);
2132
2133out:
2134 qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
2135} 2148}
2136 2149
2137/** 2150/**
@@ -2300,8 +2313,6 @@ no_mem:
2300 if (fl->use_pages) { 2313 if (fl->use_pages) {
2301 void *addr = fl->sdesc[fl->cidx].pg_chunk.va; 2314 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2302 2315
2303 prefetch(&qs->lro_frag_tbl);
2304
2305 prefetch(addr); 2316 prefetch(addr);
2306#if L1_CACHE_BYTES < 128 2317#if L1_CACHE_BYTES < 128
2307 prefetch(addr + L1_CACHE_BYTES); 2318 prefetch(addr + L1_CACHE_BYTES);
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index d8350860c0f8..e402e91bf188 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1170,6 +1170,21 @@ dm9000_stop(struct net_device *ndev)
1170 return 0; 1170 return 0;
1171} 1171}
1172 1172
1173static const struct net_device_ops dm9000_netdev_ops = {
1174 .ndo_open = dm9000_open,
1175 .ndo_stop = dm9000_stop,
1176 .ndo_start_xmit = dm9000_start_xmit,
1177 .ndo_tx_timeout = dm9000_timeout,
1178 .ndo_set_multicast_list = dm9000_hash_table,
1179 .ndo_do_ioctl = dm9000_ioctl,
1180 .ndo_change_mtu = eth_change_mtu,
1181 .ndo_validate_addr = eth_validate_addr,
1182 .ndo_set_mac_address = eth_mac_addr,
1183#ifdef CONFIG_NET_POLL_CONTROLLER
1184 .ndo_poll_controller = dm9000_poll_controller,
1185#endif
1186};
1187
1173#define res_size(_r) (((_r)->end - (_r)->start) + 1) 1188#define res_size(_r) (((_r)->end - (_r)->start) + 1)
1174 1189
1175/* 1190/*
@@ -1339,18 +1354,9 @@ dm9000_probe(struct platform_device *pdev)
1339 /* driver system function */ 1354 /* driver system function */
1340 ether_setup(ndev); 1355 ether_setup(ndev);
1341 1356
1342 ndev->open = &dm9000_open; 1357 ndev->netdev_ops = &dm9000_netdev_ops;
1343 ndev->hard_start_xmit = &dm9000_start_xmit; 1358 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1344 ndev->tx_timeout = &dm9000_timeout; 1359 ndev->ethtool_ops = &dm9000_ethtool_ops;
1345 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1346 ndev->stop = &dm9000_stop;
1347 ndev->set_multicast_list = &dm9000_hash_table;
1348 ndev->ethtool_ops = &dm9000_ethtool_ops;
1349 ndev->do_ioctl = &dm9000_ioctl;
1350
1351#ifdef CONFIG_NET_POLL_CONTROLLER
1352 ndev->poll_controller = &dm9000_poll_controller;
1353#endif
1354 1360
1355 db->msg_enable = NETIF_MSG_LINK; 1361 db->msg_enable = NETIF_MSG_LINK;
1356 db->mii.phy_id_mask = 0x1f; 1362 db->mii.phy_id_mask = 0x1f;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6a46ceed9436..71d4fe15976a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -498,6 +498,8 @@ int e1000_up(struct e1000_adapter *adapter)
498 498
499 e1000_irq_enable(adapter); 499 e1000_irq_enable(adapter);
500 500
501 netif_wake_queue(adapter->netdev);
502
501 /* fire a link change interrupt to start the watchdog */ 503 /* fire a link change interrupt to start the watchdog */
502 ew32(ICS, E1000_ICS_LSC); 504 ew32(ICS, E1000_ICS_LSC);
503 return 0; 505 return 0;
@@ -1234,15 +1236,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1234 !e1000_check_mng_mode(hw)) 1236 !e1000_check_mng_mode(hw))
1235 e1000_get_hw_control(adapter); 1237 e1000_get_hw_control(adapter);
1236 1238
1237 /* tell the stack to leave us alone until e1000_open() is called */
1238 netif_carrier_off(netdev);
1239 netif_stop_queue(netdev);
1240
1241 strcpy(netdev->name, "eth%d"); 1239 strcpy(netdev->name, "eth%d");
1242 err = register_netdev(netdev); 1240 err = register_netdev(netdev);
1243 if (err) 1241 if (err)
1244 goto err_register; 1242 goto err_register;
1245 1243
1244 /* carrier off reporting is important to ethtool even BEFORE open */
1245 netif_carrier_off(netdev);
1246
1246 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 1247 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
1247 1248
1248 cards_found++; 1249 cards_found++;
@@ -1441,6 +1442,8 @@ static int e1000_open(struct net_device *netdev)
1441 if (test_bit(__E1000_TESTING, &adapter->flags)) 1442 if (test_bit(__E1000_TESTING, &adapter->flags))
1442 return -EBUSY; 1443 return -EBUSY;
1443 1444
1445 netif_carrier_off(netdev);
1446
1444 /* allocate transmit descriptors */ 1447 /* allocate transmit descriptors */
1445 err = e1000_setup_all_tx_resources(adapter); 1448 err = e1000_setup_all_tx_resources(adapter);
1446 if (err) 1449 if (err)
@@ -2590,7 +2593,6 @@ static void e1000_watchdog(unsigned long data)
2590 ew32(TCTL, tctl); 2593 ew32(TCTL, tctl);
2591 2594
2592 netif_carrier_on(netdev); 2595 netif_carrier_on(netdev);
2593 netif_wake_queue(netdev);
2594 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2596 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2595 adapter->smartspeed = 0; 2597 adapter->smartspeed = 0;
2596 } else { 2598 } else {
@@ -2607,7 +2609,6 @@ static void e1000_watchdog(unsigned long data)
2607 printk(KERN_INFO "e1000: %s NIC Link is Down\n", 2609 printk(KERN_INFO "e1000: %s NIC Link is Down\n",
2608 netdev->name); 2610 netdev->name);
2609 netif_carrier_off(netdev); 2611 netif_carrier_off(netdev);
2610 netif_stop_queue(netdev);
2611 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2612 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2612 2613
2613 /* 80003ES2LAN workaround-- 2614 /* 80003ES2LAN workaround--
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ca82f19a7ed1..da6b37e05bea 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2826,6 +2826,8 @@ int e1000e_up(struct e1000_adapter *adapter)
2826 e1000_configure_msix(adapter); 2826 e1000_configure_msix(adapter);
2827 e1000_irq_enable(adapter); 2827 e1000_irq_enable(adapter);
2828 2828
2829 netif_wake_queue(adapter->netdev);
2830
2829 /* fire a link change interrupt to start the watchdog */ 2831 /* fire a link change interrupt to start the watchdog */
2830 ew32(ICS, E1000_ICS_LSC); 2832 ew32(ICS, E1000_ICS_LSC);
2831 return 0; 2833 return 0;
@@ -2848,7 +2850,7 @@ void e1000e_down(struct e1000_adapter *adapter)
2848 ew32(RCTL, rctl & ~E1000_RCTL_EN); 2850 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2849 /* flush and sleep below */ 2851 /* flush and sleep below */
2850 2852
2851 netif_tx_stop_all_queues(netdev); 2853 netif_stop_queue(netdev);
2852 2854
2853 /* disable transmits in the hardware */ 2855 /* disable transmits in the hardware */
2854 tctl = er32(TCTL); 2856 tctl = er32(TCTL);
@@ -3072,6 +3074,8 @@ static int e1000_open(struct net_device *netdev)
3072 if (test_bit(__E1000_TESTING, &adapter->state)) 3074 if (test_bit(__E1000_TESTING, &adapter->state))
3073 return -EBUSY; 3075 return -EBUSY;
3074 3076
3077 netif_carrier_off(netdev);
3078
3075 /* allocate transmit descriptors */ 3079 /* allocate transmit descriptors */
3076 err = e1000e_setup_tx_resources(adapter); 3080 err = e1000e_setup_tx_resources(adapter);
3077 if (err) 3081 if (err)
@@ -3128,7 +3132,7 @@ static int e1000_open(struct net_device *netdev)
3128 3132
3129 e1000_irq_enable(adapter); 3133 e1000_irq_enable(adapter);
3130 3134
3131 netif_tx_start_all_queues(netdev); 3135 netif_start_queue(netdev);
3132 3136
3133 /* fire a link status change interrupt to start the watchdog */ 3137 /* fire a link status change interrupt to start the watchdog */
3134 ew32(ICS, E1000_ICS_LSC); 3138 ew32(ICS, E1000_ICS_LSC);
@@ -3598,7 +3602,6 @@ static void e1000_watchdog_task(struct work_struct *work)
3598 phy->ops.cfg_on_link_up(hw); 3602 phy->ops.cfg_on_link_up(hw);
3599 3603
3600 netif_carrier_on(netdev); 3604 netif_carrier_on(netdev);
3601 netif_tx_wake_all_queues(netdev);
3602 3605
3603 if (!test_bit(__E1000_DOWN, &adapter->state)) 3606 if (!test_bit(__E1000_DOWN, &adapter->state))
3604 mod_timer(&adapter->phy_info_timer, 3607 mod_timer(&adapter->phy_info_timer,
@@ -3612,7 +3615,6 @@ static void e1000_watchdog_task(struct work_struct *work)
3612 printk(KERN_INFO "e1000e: %s NIC Link is Down\n", 3615 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
3613 adapter->netdev->name); 3616 adapter->netdev->name);
3614 netif_carrier_off(netdev); 3617 netif_carrier_off(netdev);
3615 netif_tx_stop_all_queues(netdev);
3616 if (!test_bit(__E1000_DOWN, &adapter->state)) 3618 if (!test_bit(__E1000_DOWN, &adapter->state))
3617 mod_timer(&adapter->phy_info_timer, 3619 mod_timer(&adapter->phy_info_timer,
3618 round_jiffies(jiffies + 2 * HZ)); 3620 round_jiffies(jiffies + 2 * HZ));
@@ -5037,15 +5039,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5037 if (!(adapter->flags & FLAG_HAS_AMT)) 5039 if (!(adapter->flags & FLAG_HAS_AMT))
5038 e1000_get_hw_control(adapter); 5040 e1000_get_hw_control(adapter);
5039 5041
5040 /* tell the stack to leave us alone until e1000_open() is called */
5041 netif_carrier_off(netdev);
5042 netif_tx_stop_all_queues(netdev);
5043
5044 strcpy(netdev->name, "eth%d"); 5042 strcpy(netdev->name, "eth%d");
5045 err = register_netdev(netdev); 5043 err = register_netdev(netdev);
5046 if (err) 5044 if (err)
5047 goto err_register; 5045 goto err_register;
5048 5046
5047 /* carrier off reporting is important to ethtool even BEFORE open */
5048 netif_carrier_off(netdev);
5049
5049 e1000_print_device_info(adapter); 5050 e1000_print_device_info(adapter);
5050 5051
5051 return 0; 5052 return 0;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 682e7f0b5581..28db6919c526 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -86,8 +86,7 @@ static unsigned char fec_mac_default[] = {
86#endif 86#endif
87#endif /* CONFIG_M5272 */ 87#endif /* CONFIG_M5272 */
88 88
89/* Forward declarations of some structures to support different PHYs 89/* Forward declarations of some structures to support different PHYs */
90*/
91 90
92typedef struct { 91typedef struct {
93 uint mii_data; 92 uint mii_data;
@@ -123,8 +122,7 @@ typedef struct {
123#error "FEC: descriptor ring size constants too large" 122#error "FEC: descriptor ring size constants too large"
124#endif 123#endif
125 124
126/* Interrupt events/masks. 125/* Interrupt events/masks. */
127*/
128#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 126#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
129#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 127#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
130#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ 128#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
@@ -165,7 +163,7 @@ typedef struct {
165 */ 163 */
166struct fec_enet_private { 164struct fec_enet_private {
167 /* Hardware registers of the FEC device */ 165 /* Hardware registers of the FEC device */
168 volatile fec_t *hwp; 166 void __iomem *hwp;
169 167
170 struct net_device *netdev; 168 struct net_device *netdev;
171 169
@@ -174,16 +172,20 @@ struct fec_enet_private {
174 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 172 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
175 unsigned char *tx_bounce[TX_RING_SIZE]; 173 unsigned char *tx_bounce[TX_RING_SIZE];
176 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 174 struct sk_buff* tx_skbuff[TX_RING_SIZE];
175 struct sk_buff* rx_skbuff[RX_RING_SIZE];
177 ushort skb_cur; 176 ushort skb_cur;
178 ushort skb_dirty; 177 ushort skb_dirty;
179 178
180 /* CPM dual port RAM relative addresses. 179 /* CPM dual port RAM relative addresses */
181 */
182 dma_addr_t bd_dma; 180 dma_addr_t bd_dma;
183 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ 181 /* Address of Rx and Tx buffers */
184 cbd_t *tx_bd_base; 182 struct bufdesc *rx_bd_base;
185 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ 183 struct bufdesc *tx_bd_base;
186 cbd_t *dirty_tx; /* The ring entries to be free()ed. */ 184 /* The next free ring entry */
185 struct bufdesc *cur_rx, *cur_tx;
186 /* The ring entries to be free()ed */
187 struct bufdesc *dirty_tx;
188
187 uint tx_full; 189 uint tx_full;
188 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 190 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
189 spinlock_t hw_lock; 191 spinlock_t hw_lock;
@@ -209,17 +211,13 @@ struct fec_enet_private {
209 int full_duplex; 211 int full_duplex;
210}; 212};
211 213
212static int fec_enet_open(struct net_device *dev);
213static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
214static void fec_enet_mii(struct net_device *dev); 214static void fec_enet_mii(struct net_device *dev);
215static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 215static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
216static void fec_enet_tx(struct net_device *dev); 216static void fec_enet_tx(struct net_device *dev);
217static void fec_enet_rx(struct net_device *dev); 217static void fec_enet_rx(struct net_device *dev);
218static int fec_enet_close(struct net_device *dev); 218static int fec_enet_close(struct net_device *dev);
219static void set_multicast_list(struct net_device *dev);
220static void fec_restart(struct net_device *dev, int duplex); 219static void fec_restart(struct net_device *dev, int duplex);
221static void fec_stop(struct net_device *dev); 220static void fec_stop(struct net_device *dev);
222static void fec_set_mac_address(struct net_device *dev);
223 221
224 222
225/* MII processing. We keep this as simple as possible. Requests are 223/* MII processing. We keep this as simple as possible. Requests are
@@ -241,19 +239,16 @@ static mii_list_t *mii_tail;
241static int mii_queue(struct net_device *dev, int request, 239static int mii_queue(struct net_device *dev, int request,
242 void (*func)(uint, struct net_device *)); 240 void (*func)(uint, struct net_device *));
243 241
244/* Make MII read/write commands for the FEC. 242/* Make MII read/write commands for the FEC */
245*/
246#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 243#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
247#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ 244#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
248 (VAL & 0xffff)) 245 (VAL & 0xffff))
249#define mk_mii_end 0 246#define mk_mii_end 0
250 247
251/* Transmitter timeout. 248/* Transmitter timeout */
252*/ 249#define TX_TIMEOUT (2 * HZ)
253#define TX_TIMEOUT (2*HZ)
254 250
255/* Register definitions for the PHY. 251/* Register definitions for the PHY */
256*/
257 252
258#define MII_REG_CR 0 /* Control Register */ 253#define MII_REG_CR 0 /* Control Register */
259#define MII_REG_SR 1 /* Status Register */ 254#define MII_REG_SR 1 /* Status Register */
@@ -288,15 +283,11 @@ static int mii_queue(struct net_device *dev, int request,
288static int 283static int
289fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 284fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
290{ 285{
291 struct fec_enet_private *fep; 286 struct fec_enet_private *fep = netdev_priv(dev);
292 volatile fec_t *fecp; 287 struct bufdesc *bdp;
293 volatile cbd_t *bdp;
294 unsigned short status; 288 unsigned short status;
295 unsigned long flags; 289 unsigned long flags;
296 290
297 fep = netdev_priv(dev);
298 fecp = (volatile fec_t*)dev->base_addr;
299
300 if (!fep->link) { 291 if (!fep->link) {
301 /* Link is down or autonegotiation is in progress. */ 292 /* Link is down or autonegotiation is in progress. */
302 return 1; 293 return 1;
@@ -307,7 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
307 bdp = fep->cur_tx; 298 bdp = fep->cur_tx;
308 299
309 status = bdp->cbd_sc; 300 status = bdp->cbd_sc;
310#ifndef final_version 301
311 if (status & BD_ENET_TX_READY) { 302 if (status & BD_ENET_TX_READY) {
312 /* Ooops. All transmit buffers are full. Bail out. 303 /* Ooops. All transmit buffers are full. Bail out.
313 * This should not happen, since dev->tbusy should be set. 304 * This should not happen, since dev->tbusy should be set.
@@ -316,21 +307,18 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
316 spin_unlock_irqrestore(&fep->hw_lock, flags); 307 spin_unlock_irqrestore(&fep->hw_lock, flags);
317 return 1; 308 return 1;
318 } 309 }
319#endif
320 310
321 /* Clear all of the status flags. 311 /* Clear all of the status flags */
322 */
323 status &= ~BD_ENET_TX_STATS; 312 status &= ~BD_ENET_TX_STATS;
324 313
325 /* Set buffer length and buffer pointer. 314 /* Set buffer length and buffer pointer */
326 */
327 bdp->cbd_bufaddr = __pa(skb->data); 315 bdp->cbd_bufaddr = __pa(skb->data);
328 bdp->cbd_datlen = skb->len; 316 bdp->cbd_datlen = skb->len;
329 317
330 /* 318 /*
331 * On some FEC implementations data must be aligned on 319 * On some FEC implementations data must be aligned on
332 * 4-byte boundaries. Use bounce buffers to copy data 320 * 4-byte boundaries. Use bounce buffers to copy data
333 * and get it aligned. Ugh. 321 * and get it aligned. Ugh.
334 */ 322 */
335 if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { 323 if (bdp->cbd_bufaddr & FEC_ALIGNMENT) {
336 unsigned int index; 324 unsigned int index;
@@ -339,8 +327,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
339 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); 327 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
340 } 328 }
341 329
342 /* Save skb pointer. 330 /* Save skb pointer */
343 */
344 fep->tx_skbuff[fep->skb_cur] = skb; 331 fep->tx_skbuff[fep->skb_cur] = skb;
345 332
346 dev->stats.tx_bytes += skb->len; 333 dev->stats.tx_bytes += skb->len;
@@ -349,13 +336,12 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
349 /* Push the data cache so the CPM does not get stale memory 336 /* Push the data cache so the CPM does not get stale memory
350 * data. 337 * data.
351 */ 338 */
352 dma_sync_single(NULL, bdp->cbd_bufaddr, 339 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
353 bdp->cbd_datlen, DMA_TO_DEVICE); 340 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
354 341
355 /* Send it on its way. Tell FEC it's ready, interrupt when done, 342 /* Send it on its way. Tell FEC it's ready, interrupt when done,
356 * it's the last BD of the frame, and to put the CRC on the end. 343 * it's the last BD of the frame, and to put the CRC on the end.
357 */ 344 */
358
359 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 345 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
360 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 346 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
361 bdp->cbd_sc = status; 347 bdp->cbd_sc = status;
@@ -363,22 +349,20 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
363 dev->trans_start = jiffies; 349 dev->trans_start = jiffies;
364 350
365 /* Trigger transmission start */ 351 /* Trigger transmission start */
366 fecp->fec_x_des_active = 0; 352 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
367 353
368 /* If this was the last BD in the ring, start at the beginning again. 354 /* If this was the last BD in the ring, start at the beginning again. */
369 */ 355 if (status & BD_ENET_TX_WRAP)
370 if (status & BD_ENET_TX_WRAP) {
371 bdp = fep->tx_bd_base; 356 bdp = fep->tx_bd_base;
372 } else { 357 else
373 bdp++; 358 bdp++;
374 }
375 359
376 if (bdp == fep->dirty_tx) { 360 if (bdp == fep->dirty_tx) {
377 fep->tx_full = 1; 361 fep->tx_full = 1;
378 netif_stop_queue(dev); 362 netif_stop_queue(dev);
379 } 363 }
380 364
381 fep->cur_tx = (cbd_t *)bdp; 365 fep->cur_tx = bdp;
382 366
383 spin_unlock_irqrestore(&fep->hw_lock, flags); 367 spin_unlock_irqrestore(&fep->hw_lock, flags);
384 368
@@ -390,75 +374,33 @@ fec_timeout(struct net_device *dev)
390{ 374{
391 struct fec_enet_private *fep = netdev_priv(dev); 375 struct fec_enet_private *fep = netdev_priv(dev);
392 376
393 printk("%s: transmit timed out.\n", dev->name);
394 dev->stats.tx_errors++; 377 dev->stats.tx_errors++;
395#ifndef final_version
396 {
397 int i;
398 cbd_t *bdp;
399
400 printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n",
401 (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "",
402 (unsigned long)fep->dirty_tx,
403 (unsigned long)fep->cur_rx);
404
405 bdp = fep->tx_bd_base;
406 printk(" tx: %u buffers\n", TX_RING_SIZE);
407 for (i = 0 ; i < TX_RING_SIZE; i++) {
408 printk(" %08x: %04x %04x %08x\n",
409 (uint) bdp,
410 bdp->cbd_sc,
411 bdp->cbd_datlen,
412 (int) bdp->cbd_bufaddr);
413 bdp++;
414 }
415 378
416 bdp = fep->rx_bd_base;
417 printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE);
418 for (i = 0 ; i < RX_RING_SIZE; i++) {
419 printk(" %08x: %04x %04x %08x\n",
420 (uint) bdp,
421 bdp->cbd_sc,
422 bdp->cbd_datlen,
423 (int) bdp->cbd_bufaddr);
424 bdp++;
425 }
426 }
427#endif
428 fec_restart(dev, fep->full_duplex); 379 fec_restart(dev, fep->full_duplex);
429 netif_wake_queue(dev); 380 netif_wake_queue(dev);
430} 381}
431 382
432/* The interrupt handler.
433 * This is called from the MPC core interrupt.
434 */
435static irqreturn_t 383static irqreturn_t
436fec_enet_interrupt(int irq, void * dev_id) 384fec_enet_interrupt(int irq, void * dev_id)
437{ 385{
438 struct net_device *dev = dev_id; 386 struct net_device *dev = dev_id;
439 volatile fec_t *fecp; 387 struct fec_enet_private *fep = netdev_priv(dev);
440 uint int_events; 388 uint int_events;
441 irqreturn_t ret = IRQ_NONE; 389 irqreturn_t ret = IRQ_NONE;
442 390
443 fecp = (volatile fec_t*)dev->base_addr;
444
445 /* Get the interrupt events that caused us to be here.
446 */
447 do { 391 do {
448 int_events = fecp->fec_ievent; 392 int_events = readl(fep->hwp + FEC_IEVENT);
449 fecp->fec_ievent = int_events; 393 writel(int_events, fep->hwp + FEC_IEVENT);
450 394
451 /* Handle receive event in its own function.
452 */
453 if (int_events & FEC_ENET_RXF) { 395 if (int_events & FEC_ENET_RXF) {
454 ret = IRQ_HANDLED; 396 ret = IRQ_HANDLED;
455 fec_enet_rx(dev); 397 fec_enet_rx(dev);
456 } 398 }
457 399
458 /* Transmit OK, or non-fatal error. Update the buffer 400 /* Transmit OK, or non-fatal error. Update the buffer
459 descriptors. FEC handles all errors, we just discover 401 * descriptors. FEC handles all errors, we just discover
460 them as part of the transmit process. 402 * them as part of the transmit process.
461 */ 403 */
462 if (int_events & FEC_ENET_TXF) { 404 if (int_events & FEC_ENET_TXF) {
463 ret = IRQ_HANDLED; 405 ret = IRQ_HANDLED;
464 fec_enet_tx(dev); 406 fec_enet_tx(dev);
@@ -479,7 +421,7 @@ static void
479fec_enet_tx(struct net_device *dev) 421fec_enet_tx(struct net_device *dev)
480{ 422{
481 struct fec_enet_private *fep; 423 struct fec_enet_private *fep;
482 volatile cbd_t *bdp; 424 struct bufdesc *bdp;
483 unsigned short status; 425 unsigned short status;
484 struct sk_buff *skb; 426 struct sk_buff *skb;
485 427
@@ -488,7 +430,11 @@ fec_enet_tx(struct net_device *dev)
488 bdp = fep->dirty_tx; 430 bdp = fep->dirty_tx;
489 431
490 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 432 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
491 if (bdp == fep->cur_tx && fep->tx_full == 0) break; 433 if (bdp == fep->cur_tx && fep->tx_full == 0)
434 break;
435
436 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
437 bdp->cbd_bufaddr = 0;
492 438
493 skb = fep->tx_skbuff[fep->skb_dirty]; 439 skb = fep->tx_skbuff[fep->skb_dirty];
494 /* Check for errors. */ 440 /* Check for errors. */
@@ -510,31 +456,27 @@ fec_enet_tx(struct net_device *dev)
510 dev->stats.tx_packets++; 456 dev->stats.tx_packets++;
511 } 457 }
512 458
513#ifndef final_version
514 if (status & BD_ENET_TX_READY) 459 if (status & BD_ENET_TX_READY)
515 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 460 printk("HEY! Enet xmit interrupt and TX_READY.\n");
516#endif 461
517 /* Deferred means some collisions occurred during transmit, 462 /* Deferred means some collisions occurred during transmit,
518 * but we eventually sent the packet OK. 463 * but we eventually sent the packet OK.
519 */ 464 */
520 if (status & BD_ENET_TX_DEF) 465 if (status & BD_ENET_TX_DEF)
521 dev->stats.collisions++; 466 dev->stats.collisions++;
522 467
523 /* Free the sk buffer associated with this last transmit. 468 /* Free the sk buffer associated with this last transmit */
524 */
525 dev_kfree_skb_any(skb); 469 dev_kfree_skb_any(skb);
526 fep->tx_skbuff[fep->skb_dirty] = NULL; 470 fep->tx_skbuff[fep->skb_dirty] = NULL;
527 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 471 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
528 472
529 /* Update pointer to next buffer descriptor to be transmitted. 473 /* Update pointer to next buffer descriptor to be transmitted */
530 */
531 if (status & BD_ENET_TX_WRAP) 474 if (status & BD_ENET_TX_WRAP)
532 bdp = fep->tx_bd_base; 475 bdp = fep->tx_bd_base;
533 else 476 else
534 bdp++; 477 bdp++;
535 478
536 /* Since we have freed up a buffer, the ring is no longer 479 /* Since we have freed up a buffer, the ring is no longer full
537 * full.
538 */ 480 */
539 if (fep->tx_full) { 481 if (fep->tx_full) {
540 fep->tx_full = 0; 482 fep->tx_full = 0;
@@ -542,7 +484,7 @@ fec_enet_tx(struct net_device *dev)
542 netif_wake_queue(dev); 484 netif_wake_queue(dev);
543 } 485 }
544 } 486 }
545 fep->dirty_tx = (cbd_t *)bdp; 487 fep->dirty_tx = bdp;
546 spin_unlock_irq(&fep->hw_lock); 488 spin_unlock_irq(&fep->hw_lock);
547} 489}
548 490
@@ -555,9 +497,8 @@ fec_enet_tx(struct net_device *dev)
555static void 497static void
556fec_enet_rx(struct net_device *dev) 498fec_enet_rx(struct net_device *dev)
557{ 499{
558 struct fec_enet_private *fep; 500 struct fec_enet_private *fep = netdev_priv(dev);
559 volatile fec_t *fecp; 501 struct bufdesc *bdp;
560 volatile cbd_t *bdp;
561 unsigned short status; 502 unsigned short status;
562 struct sk_buff *skb; 503 struct sk_buff *skb;
563 ushort pkt_len; 504 ushort pkt_len;
@@ -567,9 +508,6 @@ fec_enet_rx(struct net_device *dev)
567 flush_cache_all(); 508 flush_cache_all();
568#endif 509#endif
569 510
570 fep = netdev_priv(dev);
571 fecp = (volatile fec_t*)dev->base_addr;
572
573 spin_lock_irq(&fep->hw_lock); 511 spin_lock_irq(&fep->hw_lock);
574 512
575 /* First, grab all of the stats for the incoming packet. 513 /* First, grab all of the stats for the incoming packet.
@@ -577,143 +515,121 @@ fec_enet_rx(struct net_device *dev)
577 */ 515 */
578 bdp = fep->cur_rx; 516 bdp = fep->cur_rx;
579 517
580while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 518 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
581 519
582#ifndef final_version 520 /* Since we have allocated space to hold a complete frame,
583 /* Since we have allocated space to hold a complete frame, 521 * the last indicator should be set.
584 * the last indicator should be set. 522 */
585 */ 523 if ((status & BD_ENET_RX_LAST) == 0)
586 if ((status & BD_ENET_RX_LAST) == 0) 524 printk("FEC ENET: rcv is not +last\n");
587 printk("FEC ENET: rcv is not +last\n");
588#endif
589 525
590 if (!fep->opened) 526 if (!fep->opened)
591 goto rx_processing_done; 527 goto rx_processing_done;
592 528
593 /* Check for errors. */ 529 /* Check for errors. */
594 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 530 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
595 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 531 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
596 dev->stats.rx_errors++; 532 dev->stats.rx_errors++;
597 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 533 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
598 /* Frame too long or too short. */ 534 /* Frame too long or too short. */
599 dev->stats.rx_length_errors++; 535 dev->stats.rx_length_errors++;
536 }
537 if (status & BD_ENET_RX_NO) /* Frame alignment */
538 dev->stats.rx_frame_errors++;
539 if (status & BD_ENET_RX_CR) /* CRC Error */
540 dev->stats.rx_crc_errors++;
541 if (status & BD_ENET_RX_OV) /* FIFO overrun */
542 dev->stats.rx_fifo_errors++;
600 } 543 }
601 if (status & BD_ENET_RX_NO) /* Frame alignment */ 544
545 /* Report late collisions as a frame error.
546 * On this error, the BD is closed, but we don't know what we
547 * have in the buffer. So, just drop this frame on the floor.
548 */
549 if (status & BD_ENET_RX_CL) {
550 dev->stats.rx_errors++;
602 dev->stats.rx_frame_errors++; 551 dev->stats.rx_frame_errors++;
603 if (status & BD_ENET_RX_CR) /* CRC Error */ 552 goto rx_processing_done;
604 dev->stats.rx_crc_errors++; 553 }
605 if (status & BD_ENET_RX_OV) /* FIFO overrun */
606 dev->stats.rx_fifo_errors++;
607 }
608 554
609 /* Report late collisions as a frame error. 555 /* Process the incoming frame. */
610 * On this error, the BD is closed, but we don't know what we 556 dev->stats.rx_packets++;
611 * have in the buffer. So, just drop this frame on the floor. 557 pkt_len = bdp->cbd_datlen;
612 */ 558 dev->stats.rx_bytes += pkt_len;
613 if (status & BD_ENET_RX_CL) { 559 data = (__u8*)__va(bdp->cbd_bufaddr);
614 dev->stats.rx_errors++;
615 dev->stats.rx_frame_errors++;
616 goto rx_processing_done;
617 }
618 560
619 /* Process the incoming frame. 561 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
620 */ 562 DMA_FROM_DEVICE);
621 dev->stats.rx_packets++;
622 pkt_len = bdp->cbd_datlen;
623 dev->stats.rx_bytes += pkt_len;
624 data = (__u8*)__va(bdp->cbd_bufaddr);
625
626 dma_sync_single(NULL, (unsigned long)__pa(data),
627 pkt_len - 4, DMA_FROM_DEVICE);
628
629 /* This does 16 byte alignment, exactly what we need.
630 * The packet length includes FCS, but we don't want to
631 * include that when passing upstream as it messes up
632 * bridging applications.
633 */
634 skb = dev_alloc_skb(pkt_len-4);
635 563
636 if (skb == NULL) { 564 /* This does 16 byte alignment, exactly what we need.
637 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 565 * The packet length includes FCS, but we don't want to
638 dev->stats.rx_dropped++; 566 * include that when passing upstream as it messes up
639 } else { 567 * bridging applications.
640 skb_put(skb,pkt_len-4); /* Make room */ 568 */
641 skb_copy_to_linear_data(skb, data, pkt_len-4); 569 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
642 skb->protocol=eth_type_trans(skb,dev);
643 netif_rx(skb);
644 }
645 rx_processing_done:
646 570
647 /* Clear the status flags for this buffer. 571 if (unlikely(!skb)) {
648 */ 572 printk("%s: Memory squeeze, dropping packet.\n",
649 status &= ~BD_ENET_RX_STATS; 573 dev->name);
574 dev->stats.rx_dropped++;
575 } else {
576 skb_reserve(skb, NET_IP_ALIGN);
577 skb_put(skb, pkt_len - 4); /* Make room */
578 skb_copy_to_linear_data(skb, data, pkt_len - 4);
579 skb->protocol = eth_type_trans(skb, dev);
580 netif_rx(skb);
581 }
650 582
651 /* Mark the buffer empty. 583 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
652 */ 584 DMA_FROM_DEVICE);
653 status |= BD_ENET_RX_EMPTY; 585rx_processing_done:
654 bdp->cbd_sc = status; 586 /* Clear the status flags for this buffer */
587 status &= ~BD_ENET_RX_STATS;
655 588
656 /* Update BD pointer to next entry. 589 /* Mark the buffer empty */
657 */ 590 status |= BD_ENET_RX_EMPTY;
658 if (status & BD_ENET_RX_WRAP) 591 bdp->cbd_sc = status;
659 bdp = fep->rx_bd_base;
660 else
661 bdp++;
662 592
663#if 1 593 /* Update BD pointer to next entry */
664 /* Doing this here will keep the FEC running while we process 594 if (status & BD_ENET_RX_WRAP)
665 * incoming frames. On a heavily loaded network, we should be 595 bdp = fep->rx_bd_base;
666 * able to keep up at the expense of system resources. 596 else
667 */ 597 bdp++;
668 fecp->fec_r_des_active = 0; 598 /* Doing this here will keep the FEC running while we process
669#endif 599 * incoming frames. On a heavily loaded network, we should be
670 } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */ 600 * able to keep up at the expense of system resources.
671 fep->cur_rx = (cbd_t *)bdp; 601 */
672 602 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
673#if 0 603 }
674 /* Doing this here will allow us to process all frames in the 604 fep->cur_rx = bdp;
675 * ring before the FEC is allowed to put more there. On a heavily
676 * loaded network, some frames may be lost. Unfortunately, this
677 * increases the interrupt overhead since we can potentially work
678 * our way back to the interrupt return only to come right back
679 * here.
680 */
681 fecp->fec_r_des_active = 0;
682#endif
683 605
684 spin_unlock_irq(&fep->hw_lock); 606 spin_unlock_irq(&fep->hw_lock);
685} 607}
686 608
687
688/* called from interrupt context */ 609/* called from interrupt context */
689static void 610static void
690fec_enet_mii(struct net_device *dev) 611fec_enet_mii(struct net_device *dev)
691{ 612{
692 struct fec_enet_private *fep; 613 struct fec_enet_private *fep;
693 volatile fec_t *ep;
694 mii_list_t *mip; 614 mii_list_t *mip;
695 uint mii_reg;
696 615
697 fep = netdev_priv(dev); 616 fep = netdev_priv(dev);
698 spin_lock_irq(&fep->mii_lock); 617 spin_lock_irq(&fep->mii_lock);
699 618
700 ep = fep->hwp;
701 mii_reg = ep->fec_mii_data;
702
703 if ((mip = mii_head) == NULL) { 619 if ((mip = mii_head) == NULL) {
704 printk("MII and no head!\n"); 620 printk("MII and no head!\n");
705 goto unlock; 621 goto unlock;
706 } 622 }
707 623
708 if (mip->mii_func != NULL) 624 if (mip->mii_func != NULL)
709 (*(mip->mii_func))(mii_reg, dev); 625 (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
710 626
711 mii_head = mip->mii_next; 627 mii_head = mip->mii_next;
712 mip->mii_next = mii_free; 628 mip->mii_next = mii_free;
713 mii_free = mip; 629 mii_free = mip;
714 630
715 if ((mip = mii_head) != NULL) 631 if ((mip = mii_head) != NULL)
716 ep->fec_mii_data = mip->mii_regval; 632 writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
717 633
718unlock: 634unlock:
719 spin_unlock_irq(&fep->mii_lock); 635 spin_unlock_irq(&fep->mii_lock);
@@ -727,8 +643,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
727 mii_list_t *mip; 643 mii_list_t *mip;
728 int retval; 644 int retval;
729 645
730 /* Add PHY address to register command. 646 /* Add PHY address to register command */
731 */
732 fep = netdev_priv(dev); 647 fep = netdev_priv(dev);
733 spin_lock_irqsave(&fep->mii_lock, flags); 648 spin_lock_irqsave(&fep->mii_lock, flags);
734 649
@@ -745,7 +660,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
745 mii_tail = mip; 660 mii_tail = mip;
746 } else { 661 } else {
747 mii_head = mii_tail = mip; 662 mii_head = mii_tail = mip;
748 fep->hwp->fec_mii_data = regval; 663 writel(regval, fep->hwp + FEC_MII_DATA);
749 } 664 }
750 } else { 665 } else {
751 retval = 1; 666 retval = 1;
@@ -1246,11 +1161,8 @@ static void __inline__ fec_phy_ack_intr(void)
1246static void __inline__ fec_get_mac(struct net_device *dev) 1161static void __inline__ fec_get_mac(struct net_device *dev)
1247{ 1162{
1248 struct fec_enet_private *fep = netdev_priv(dev); 1163 struct fec_enet_private *fep = netdev_priv(dev);
1249 volatile fec_t *fecp;
1250 unsigned char *iap, tmpaddr[ETH_ALEN]; 1164 unsigned char *iap, tmpaddr[ETH_ALEN];
1251 1165
1252 fecp = fep->hwp;
1253
1254 if (FEC_FLASHMAC) { 1166 if (FEC_FLASHMAC) {
1255 /* 1167 /*
1256 * Get MAC address from FLASH. 1168 * Get MAC address from FLASH.
@@ -1264,8 +1176,8 @@ static void __inline__ fec_get_mac(struct net_device *dev)
1264 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1176 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
1265 iap = fec_mac_default; 1177 iap = fec_mac_default;
1266 } else { 1178 } else {
1267 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1179 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
1268 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1180 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1269 iap = &tmpaddr[0]; 1181 iap = &tmpaddr[0];
1270 } 1182 }
1271 1183
@@ -1375,11 +1287,6 @@ static void mii_relink(struct work_struct *work)
1375 fec_restart(dev, duplex); 1287 fec_restart(dev, duplex);
1376 } else 1288 } else
1377 fec_stop(dev); 1289 fec_stop(dev);
1378
1379#if 0
1380 enable_irq(fep->mii_irq);
1381#endif
1382
1383} 1290}
1384 1291
1385/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 1292/* mii_queue_relink is called in interrupt context from mii_link_interrupt */
@@ -1388,12 +1295,12 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev)
1388 struct fec_enet_private *fep = netdev_priv(dev); 1295 struct fec_enet_private *fep = netdev_priv(dev);
1389 1296
1390 /* 1297 /*
1391 ** We cannot queue phy_task twice in the workqueue. It 1298 * We cannot queue phy_task twice in the workqueue. It
1392 ** would cause an endless loop in the workqueue. 1299 * would cause an endless loop in the workqueue.
1393 ** Fortunately, if the last mii_relink entry has not yet been 1300 * Fortunately, if the last mii_relink entry has not yet been
1394 ** executed now, it will do the job for the current interrupt, 1301 * executed now, it will do the job for the current interrupt,
1395 ** which is just what we want. 1302 * which is just what we want.
1396 */ 1303 */
1397 if (fep->mii_phy_task_queued) 1304 if (fep->mii_phy_task_queued)
1398 return; 1305 return;
1399 1306
@@ -1424,8 +1331,7 @@ phy_cmd_t const phy_cmd_config[] = {
1424 { mk_mii_end, } 1331 { mk_mii_end, }
1425 }; 1332 };
1426 1333
1427/* Read remainder of PHY ID. 1334/* Read remainder of PHY ID. */
1428*/
1429static void 1335static void
1430mii_discover_phy3(uint mii_reg, struct net_device *dev) 1336mii_discover_phy3(uint mii_reg, struct net_device *dev)
1431{ 1337{
@@ -1457,17 +1363,14 @@ static void
1457mii_discover_phy(uint mii_reg, struct net_device *dev) 1363mii_discover_phy(uint mii_reg, struct net_device *dev)
1458{ 1364{
1459 struct fec_enet_private *fep; 1365 struct fec_enet_private *fep;
1460 volatile fec_t *fecp;
1461 uint phytype; 1366 uint phytype;
1462 1367
1463 fep = netdev_priv(dev); 1368 fep = netdev_priv(dev);
1464 fecp = fep->hwp;
1465 1369
1466 if (fep->phy_addr < 32) { 1370 if (fep->phy_addr < 32) {
1467 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { 1371 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
1468 1372
1469 /* Got first part of ID, now get remainder. 1373 /* Got first part of ID, now get remainder */
1470 */
1471 fep->phy_id = phytype << 16; 1374 fep->phy_id = phytype << 16;
1472 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), 1375 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2),
1473 mii_discover_phy3); 1376 mii_discover_phy3);
@@ -1479,15 +1382,15 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
1479 } else { 1382 } else {
1480 printk("FEC: No PHY device found.\n"); 1383 printk("FEC: No PHY device found.\n");
1481 /* Disable external MII interface */ 1384 /* Disable external MII interface */
1482 fecp->fec_mii_speed = fep->phy_speed = 0; 1385 writel(0, fep->hwp + FEC_MII_SPEED);
1386 fep->phy_speed = 0;
1483#ifdef HAVE_mii_link_interrupt 1387#ifdef HAVE_mii_link_interrupt
1484 fec_disable_phy_intr(); 1388 fec_disable_phy_intr();
1485#endif 1389#endif
1486 } 1390 }
1487} 1391}
1488 1392
1489/* This interrupt occurs when the PHY detects a link change. 1393/* This interrupt occurs when the PHY detects a link change */
1490*/
1491#ifdef HAVE_mii_link_interrupt 1394#ifdef HAVE_mii_link_interrupt
1492static irqreturn_t 1395static irqreturn_t
1493mii_link_interrupt(int irq, void * dev_id) 1396mii_link_interrupt(int irq, void * dev_id)
@@ -1497,10 +1400,6 @@ mii_link_interrupt(int irq, void * dev_id)
1497 1400
1498 fec_phy_ack_intr(); 1401 fec_phy_ack_intr();
1499 1402
1500#if 0
1501 disable_irq(fep->mii_irq); /* disable now, enable later */
1502#endif
1503
1504 mii_do_cmd(dev, fep->phy->ack_int); 1403 mii_do_cmd(dev, fep->phy->ack_int);
1505 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 1404 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
1506 1405
@@ -1508,19 +1407,91 @@ mii_link_interrupt(int irq, void * dev_id)
1508} 1407}
1509#endif 1408#endif
1510 1409
1410static void fec_enet_free_buffers(struct net_device *dev)
1411{
1412 struct fec_enet_private *fep = netdev_priv(dev);
1413 int i;
1414 struct sk_buff *skb;
1415 struct bufdesc *bdp;
1416
1417 bdp = fep->rx_bd_base;
1418 for (i = 0; i < RX_RING_SIZE; i++) {
1419 skb = fep->rx_skbuff[i];
1420
1421 if (bdp->cbd_bufaddr)
1422 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
1423 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1424 if (skb)
1425 dev_kfree_skb(skb);
1426 bdp++;
1427 }
1428
1429 bdp = fep->tx_bd_base;
1430 for (i = 0; i < TX_RING_SIZE; i++)
1431 kfree(fep->tx_bounce[i]);
1432}
1433
1434static int fec_enet_alloc_buffers(struct net_device *dev)
1435{
1436 struct fec_enet_private *fep = netdev_priv(dev);
1437 int i;
1438 struct sk_buff *skb;
1439 struct bufdesc *bdp;
1440
1441 bdp = fep->rx_bd_base;
1442 for (i = 0; i < RX_RING_SIZE; i++) {
1443 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
1444 if (!skb) {
1445 fec_enet_free_buffers(dev);
1446 return -ENOMEM;
1447 }
1448 fep->rx_skbuff[i] = skb;
1449
1450 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
1451 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1452 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1453 bdp++;
1454 }
1455
1456 /* Set the last buffer to wrap. */
1457 bdp--;
1458 bdp->cbd_sc |= BD_SC_WRAP;
1459
1460 bdp = fep->tx_bd_base;
1461 for (i = 0; i < TX_RING_SIZE; i++) {
1462 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1463
1464 bdp->cbd_sc = 0;
1465 bdp->cbd_bufaddr = 0;
1466 bdp++;
1467 }
1468
1469 /* Set the last buffer to wrap. */
1470 bdp--;
1471 bdp->cbd_sc |= BD_SC_WRAP;
1472
1473 return 0;
1474}
1475
1511static int 1476static int
1512fec_enet_open(struct net_device *dev) 1477fec_enet_open(struct net_device *dev)
1513{ 1478{
1514 struct fec_enet_private *fep = netdev_priv(dev); 1479 struct fec_enet_private *fep = netdev_priv(dev);
1480 int ret;
1515 1481
1516 /* I should reset the ring buffers here, but I don't yet know 1482 /* I should reset the ring buffers here, but I don't yet know
1517 * a simple way to do that. 1483 * a simple way to do that.
1518 */ 1484 */
1519 fec_set_mac_address(dev); 1485
1486 ret = fec_enet_alloc_buffers(dev);
1487 if (ret)
1488 return ret;
1520 1489
1521 fep->sequence_done = 0; 1490 fep->sequence_done = 0;
1522 fep->link = 0; 1491 fep->link = 0;
1523 1492
1493 fec_restart(dev, 1);
1494
1524 if (fep->phy) { 1495 if (fep->phy) {
1525 mii_do_cmd(dev, fep->phy->ack_int); 1496 mii_do_cmd(dev, fep->phy->ack_int);
1526 mii_do_cmd(dev, fep->phy->config); 1497 mii_do_cmd(dev, fep->phy->config);
@@ -1537,21 +1508,17 @@ fec_enet_open(struct net_device *dev)
1537 schedule(); 1508 schedule();
1538 1509
1539 mii_do_cmd(dev, fep->phy->startup); 1510 mii_do_cmd(dev, fep->phy->startup);
1540
1541 /* Set the initial link state to true. A lot of hardware
1542 * based on this device does not implement a PHY interrupt,
1543 * so we are never notified of link change.
1544 */
1545 fep->link = 1;
1546 } else {
1547 fep->link = 1; /* lets just try it and see */
1548 /* no phy, go full duplex, it's most likely a hub chip */
1549 fec_restart(dev, 1);
1550 } 1511 }
1551 1512
1513 /* Set the initial link state to true. A lot of hardware
1514 * based on this device does not implement a PHY interrupt,
1515 * so we are never notified of link change.
1516 */
1517 fep->link = 1;
1518
1552 netif_start_queue(dev); 1519 netif_start_queue(dev);
1553 fep->opened = 1; 1520 fep->opened = 1;
1554 return 0; /* Success */ 1521 return 0;
1555} 1522}
1556 1523
1557static int 1524static int
@@ -1559,12 +1526,13 @@ fec_enet_close(struct net_device *dev)
1559{ 1526{
1560 struct fec_enet_private *fep = netdev_priv(dev); 1527 struct fec_enet_private *fep = netdev_priv(dev);
1561 1528
1562 /* Don't know what to do yet. 1529 /* Don't know what to do yet. */
1563 */
1564 fep->opened = 0; 1530 fep->opened = 0;
1565 netif_stop_queue(dev); 1531 netif_stop_queue(dev);
1566 fec_stop(dev); 1532 fec_stop(dev);
1567 1533
1534 fec_enet_free_buffers(dev);
1535
1568 return 0; 1536 return 0;
1569} 1537}
1570 1538
@@ -1583,87 +1551,102 @@ fec_enet_close(struct net_device *dev)
1583 1551
1584static void set_multicast_list(struct net_device *dev) 1552static void set_multicast_list(struct net_device *dev)
1585{ 1553{
1586 struct fec_enet_private *fep; 1554 struct fec_enet_private *fep = netdev_priv(dev);
1587 volatile fec_t *ep;
1588 struct dev_mc_list *dmi; 1555 struct dev_mc_list *dmi;
1589 unsigned int i, j, bit, data, crc; 1556 unsigned int i, j, bit, data, crc, tmp;
1590 unsigned char hash; 1557 unsigned char hash;
1591 1558
1592 fep = netdev_priv(dev); 1559 if (dev->flags & IFF_PROMISC) {
1593 ep = fep->hwp; 1560 tmp = readl(fep->hwp + FEC_R_CNTRL);
1561 tmp |= 0x8;
1562 writel(tmp, fep->hwp + FEC_R_CNTRL);
1563 return;
1564 }
1594 1565
1595 if (dev->flags&IFF_PROMISC) { 1566 tmp = readl(fep->hwp + FEC_R_CNTRL);
1596 ep->fec_r_cntrl |= 0x0008; 1567 tmp &= ~0x8;
1597 } else { 1568 writel(tmp, fep->hwp + FEC_R_CNTRL);
1569
1570 if (dev->flags & IFF_ALLMULTI) {
1571 /* Catch all multicast addresses, so set the
1572 * filter to all 1's
1573 */
1574 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1575 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1598 1576
1599 ep->fec_r_cntrl &= ~0x0008; 1577 return;
1578 }
1600 1579
1601 if (dev->flags & IFF_ALLMULTI) { 1580 /* Clear filter and add the addresses in hash register
1602 /* Catch all multicast addresses, so set the 1581 */
1603 * filter to all 1's. 1582 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1604 */ 1583 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1605 ep->fec_grp_hash_table_high = 0xffffffff; 1584
1606 ep->fec_grp_hash_table_low = 0xffffffff; 1585 dmi = dev->mc_list;
1607 } else { 1586
1608 /* Clear filter and add the addresses in hash register. 1587 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
1609 */ 1588 /* Only support group multicast for now */
1610 ep->fec_grp_hash_table_high = 0; 1589 if (!(dmi->dmi_addr[0] & 1))
1611 ep->fec_grp_hash_table_low = 0; 1590 continue;
1612 1591
1613 dmi = dev->mc_list; 1592 /* calculate crc32 value of mac address */
1614 1593 crc = 0xffffffff;
1615 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) 1594
1616 { 1595 for (i = 0; i < dmi->dmi_addrlen; i++) {
1617 /* Only support group multicast for now. 1596 data = dmi->dmi_addr[i];
1618 */ 1597 for (bit = 0; bit < 8; bit++, data >>= 1) {
1619 if (!(dmi->dmi_addr[0] & 1)) 1598 crc = (crc >> 1) ^
1620 continue; 1599 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1621
1622 /* calculate crc32 value of mac address
1623 */
1624 crc = 0xffffffff;
1625
1626 for (i = 0; i < dmi->dmi_addrlen; i++)
1627 {
1628 data = dmi->dmi_addr[i];
1629 for (bit = 0; bit < 8; bit++, data >>= 1)
1630 {
1631 crc = (crc >> 1) ^
1632 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1633 }
1634 }
1635
1636 /* only upper 6 bits (HASH_BITS) are used
1637 which point to specific bit in he hash registers
1638 */
1639 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1640
1641 if (hash > 31)
1642 ep->fec_grp_hash_table_high |= 1 << (hash - 32);
1643 else
1644 ep->fec_grp_hash_table_low |= 1 << hash;
1645 } 1600 }
1646 } 1601 }
1602
1603 /* only upper 6 bits (HASH_BITS) are used
1604 * which point to specific bit in he hash registers
1605 */
1606 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1607
1608 if (hash > 31) {
1609 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1610 tmp |= 1 << (hash - 32);
1611 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1612 } else {
1613 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1614 tmp |= 1 << hash;
1615 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1616 }
1647 } 1617 }
1648} 1618}
1649 1619
1650/* Set a MAC change in hardware. 1620/* Set a MAC change in hardware. */
1651 */ 1621static int
1652static void 1622fec_set_mac_address(struct net_device *dev, void *p)
1653fec_set_mac_address(struct net_device *dev)
1654{ 1623{
1655 volatile fec_t *fecp; 1624 struct fec_enet_private *fep = netdev_priv(dev);
1625 struct sockaddr *addr = p;
1656 1626
1657 fecp = ((struct fec_enet_private *)netdev_priv(dev))->hwp; 1627 if (!is_valid_ether_addr(addr->sa_data))
1628 return -EADDRNOTAVAIL;
1658 1629
1659 /* Set station address. */ 1630 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1660 fecp->fec_addr_low = dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
1661 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24);
1662 fecp->fec_addr_high = (dev->dev_addr[5] << 16) |
1663 (dev->dev_addr[4] << 24);
1664 1631
1632 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
1633 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1634 fep->hwp + FEC_ADDR_LOW);
1635 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1636 fep + FEC_ADDR_HIGH);
1637 return 0;
1665} 1638}
1666 1639
1640static const struct net_device_ops fec_netdev_ops = {
1641 .ndo_open = fec_enet_open,
1642 .ndo_stop = fec_enet_close,
1643 .ndo_start_xmit = fec_enet_start_xmit,
1644 .ndo_set_multicast_list = set_multicast_list,
1645 .ndo_validate_addr = eth_validate_addr,
1646 .ndo_tx_timeout = fec_timeout,
1647 .ndo_set_mac_address = fec_set_mac_address,
1648};
1649
1667 /* 1650 /*
1668 * XXX: We need to clean up on failure exits here. 1651 * XXX: We need to clean up on failure exits here.
1669 * 1652 *
@@ -1672,17 +1655,13 @@ fec_set_mac_address(struct net_device *dev)
1672int __init fec_enet_init(struct net_device *dev, int index) 1655int __init fec_enet_init(struct net_device *dev, int index)
1673{ 1656{
1674 struct fec_enet_private *fep = netdev_priv(dev); 1657 struct fec_enet_private *fep = netdev_priv(dev);
1675 unsigned long mem_addr; 1658 struct bufdesc *cbd_base;
1676 volatile cbd_t *bdp; 1659 int i;
1677 cbd_t *cbd_base;
1678 volatile fec_t *fecp;
1679 int i, j;
1680 1660
1681 /* Allocate memory for buffer descriptors. 1661 /* Allocate memory for buffer descriptors. */
1682 */ 1662 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1683 mem_addr = (unsigned long)dma_alloc_coherent(NULL, PAGE_SIZE, 1663 GFP_KERNEL);
1684 &fep->bd_dma, GFP_KERNEL); 1664 if (!cbd_base) {
1685 if (mem_addr == 0) {
1686 printk("FEC: allocate descriptor memory failed?\n"); 1665 printk("FEC: allocate descriptor memory failed?\n");
1687 return -ENOMEM; 1666 return -ENOMEM;
1688 } 1667 }
@@ -1690,146 +1669,47 @@ int __init fec_enet_init(struct net_device *dev, int index)
1690 spin_lock_init(&fep->hw_lock); 1669 spin_lock_init(&fep->hw_lock);
1691 spin_lock_init(&fep->mii_lock); 1670 spin_lock_init(&fep->mii_lock);
1692 1671
1693 /* Create an Ethernet device instance.
1694 */
1695 fecp = (volatile fec_t *)dev->base_addr;
1696
1697 fep->index = index; 1672 fep->index = index;
1698 fep->hwp = fecp; 1673 fep->hwp = (void __iomem *)dev->base_addr;
1699 fep->netdev = dev; 1674 fep->netdev = dev;
1700 1675
1701 /* Whack a reset. We should wait for this.
1702 */
1703 fecp->fec_ecntrl = 1;
1704 udelay(10);
1705
1706 /* Set the Ethernet address */ 1676 /* Set the Ethernet address */
1707#ifdef CONFIG_M5272 1677#ifdef CONFIG_M5272
1708 fec_get_mac(dev); 1678 fec_get_mac(dev);
1709#else 1679#else
1710 { 1680 {
1711 unsigned long l; 1681 unsigned long l;
1712 l = fecp->fec_addr_low; 1682 l = readl(fep->hwp + FEC_ADDR_LOW);
1713 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); 1683 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1714 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); 1684 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1715 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); 1685 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1716 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); 1686 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
1717 l = fecp->fec_addr_high; 1687 l = readl(fep->hwp + FEC_ADDR_HIGH);
1718 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); 1688 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1719 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); 1689 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1720 } 1690 }
1721#endif 1691#endif
1722 1692
1723 cbd_base = (cbd_t *)mem_addr; 1693 /* Set receive and transmit descriptor base. */
1724
1725 /* Set receive and transmit descriptor base.
1726 */
1727 fep->rx_bd_base = cbd_base; 1694 fep->rx_bd_base = cbd_base;
1728 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1695 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1729 1696
1730 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1731 fep->cur_rx = fep->rx_bd_base;
1732
1733 fep->skb_cur = fep->skb_dirty = 0;
1734
1735 /* Initialize the receive buffer descriptors.
1736 */
1737 bdp = fep->rx_bd_base;
1738 for (i=0; i<FEC_ENET_RX_PAGES; i++) {
1739
1740 /* Allocate a page.
1741 */
1742 mem_addr = __get_free_page(GFP_KERNEL);
1743 /* XXX: missing check for allocation failure */
1744
1745 /* Initialize the BD for every fragment in the page.
1746 */
1747 for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
1748 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1749 bdp->cbd_bufaddr = __pa(mem_addr);
1750 mem_addr += FEC_ENET_RX_FRSIZE;
1751 bdp++;
1752 }
1753 }
1754
1755 /* Set the last buffer to wrap.
1756 */
1757 bdp--;
1758 bdp->cbd_sc |= BD_SC_WRAP;
1759
1760 /* ...and the same for transmmit.
1761 */
1762 bdp = fep->tx_bd_base;
1763 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) {
1764 if (j >= FEC_ENET_TX_FRPPG) {
1765 mem_addr = __get_free_page(GFP_KERNEL);
1766 j = 1;
1767 } else {
1768 mem_addr += FEC_ENET_TX_FRSIZE;
1769 j++;
1770 }
1771 fep->tx_bounce[i] = (unsigned char *) mem_addr;
1772
1773 /* Initialize the BD for every fragment in the page.
1774 */
1775 bdp->cbd_sc = 0;
1776 bdp->cbd_bufaddr = 0;
1777 bdp++;
1778 }
1779
1780 /* Set the last buffer to wrap.
1781 */
1782 bdp--;
1783 bdp->cbd_sc |= BD_SC_WRAP;
1784
1785 /* Set receive and transmit descriptor base.
1786 */
1787 fecp->fec_r_des_start = fep->bd_dma;
1788 fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t)
1789 * RX_RING_SIZE;
1790
1791#ifdef HAVE_mii_link_interrupt 1697#ifdef HAVE_mii_link_interrupt
1792 fec_request_mii_intr(dev); 1698 fec_request_mii_intr(dev);
1793#endif 1699#endif
1794 1700 /* The FEC Ethernet specific entries in the device structure */
1795 fecp->fec_grp_hash_table_high = 0;
1796 fecp->fec_grp_hash_table_low = 0;
1797 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
1798 fecp->fec_ecntrl = 2;
1799 fecp->fec_r_des_active = 0;
1800#ifndef CONFIG_M5272
1801 fecp->fec_hash_table_high = 0;
1802 fecp->fec_hash_table_low = 0;
1803#endif
1804
1805 /* The FEC Ethernet specific entries in the device structure. */
1806 dev->open = fec_enet_open;
1807 dev->hard_start_xmit = fec_enet_start_xmit;
1808 dev->tx_timeout = fec_timeout;
1809 dev->watchdog_timeo = TX_TIMEOUT; 1701 dev->watchdog_timeo = TX_TIMEOUT;
1810 dev->stop = fec_enet_close; 1702 dev->netdev_ops = &fec_netdev_ops;
1811 dev->set_multicast_list = set_multicast_list;
1812 1703
1813 for (i=0; i<NMII-1; i++) 1704 for (i=0; i<NMII-1; i++)
1814 mii_cmds[i].mii_next = &mii_cmds[i+1]; 1705 mii_cmds[i].mii_next = &mii_cmds[i+1];
1815 mii_free = mii_cmds; 1706 mii_free = mii_cmds;
1816 1707
1817 /* setup MII interface */ 1708 /* Set MII speed to 2.5 MHz */
1818 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
1819 fecp->fec_x_cntrl = 0x00;
1820
1821 /*
1822 * Set MII speed to 2.5 MHz
1823 */
1824 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) 1709 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
1825 / 2500000) / 2) & 0x3F) << 1; 1710 / 2500000) / 2) & 0x3F) << 1;
1826 fecp->fec_mii_speed = fep->phy_speed;
1827 fec_restart(dev, 0); 1711 fec_restart(dev, 0);
1828 1712
1829 /* Clear and enable interrupts */
1830 fecp->fec_ievent = 0xffc00000;
1831 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
1832
1833 /* Queue up command to detect the PHY and initialize the 1713 /* Queue up command to detect the PHY and initialize the
1834 * remainder of the interface. 1714 * remainder of the interface.
1835 */ 1715 */
@@ -1847,145 +1727,118 @@ int __init fec_enet_init(struct net_device *dev, int index)
1847static void 1727static void
1848fec_restart(struct net_device *dev, int duplex) 1728fec_restart(struct net_device *dev, int duplex)
1849{ 1729{
1850 struct fec_enet_private *fep; 1730 struct fec_enet_private *fep = netdev_priv(dev);
1851 volatile cbd_t *bdp; 1731 struct bufdesc *bdp;
1852 volatile fec_t *fecp;
1853 int i; 1732 int i;
1854 1733
1855 fep = netdev_priv(dev); 1734 /* Whack a reset. We should wait for this. */
1856 fecp = fep->hwp; 1735 writel(1, fep->hwp + FEC_ECNTRL);
1857
1858 /* Whack a reset. We should wait for this.
1859 */
1860 fecp->fec_ecntrl = 1;
1861 udelay(10); 1736 udelay(10);
1862 1737
1863 /* Clear any outstanding interrupt. 1738 /* Clear any outstanding interrupt. */
1864 */ 1739 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1865 fecp->fec_ievent = 0xffc00000;
1866 1740
1867 /* Set station address. 1741 /* Reset all multicast. */
1868 */ 1742 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1869 fec_set_mac_address(dev); 1743 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1744#ifndef CONFIG_M5272
1745 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1746 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1747#endif
1870 1748
1871 /* Reset all multicast. 1749 /* Set maximum receive buffer size. */
1872 */ 1750 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1873 fecp->fec_grp_hash_table_high = 0;
1874 fecp->fec_grp_hash_table_low = 0;
1875 1751
1876 /* Set maximum receive buffer size. 1752 /* Set receive and transmit descriptor base. */
1877 */ 1753 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1878 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 1754 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1879 1755 fep->hwp + FEC_X_DES_START);
1880 /* Set receive and transmit descriptor base.
1881 */
1882 fecp->fec_r_des_start = fep->bd_dma;
1883 fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t)
1884 * RX_RING_SIZE;
1885 1756
1886 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 1757 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1887 fep->cur_rx = fep->rx_bd_base; 1758 fep->cur_rx = fep->rx_bd_base;
1888 1759
1889 /* Reset SKB transmit buffers. 1760 /* Reset SKB transmit buffers. */
1890 */
1891 fep->skb_cur = fep->skb_dirty = 0; 1761 fep->skb_cur = fep->skb_dirty = 0;
1892 for (i=0; i<=TX_RING_MOD_MASK; i++) { 1762 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1893 if (fep->tx_skbuff[i] != NULL) { 1763 if (fep->tx_skbuff[i]) {
1894 dev_kfree_skb_any(fep->tx_skbuff[i]); 1764 dev_kfree_skb_any(fep->tx_skbuff[i]);
1895 fep->tx_skbuff[i] = NULL; 1765 fep->tx_skbuff[i] = NULL;
1896 } 1766 }
1897 } 1767 }
1898 1768
1899 /* Initialize the receive buffer descriptors. 1769 /* Initialize the receive buffer descriptors. */
1900 */
1901 bdp = fep->rx_bd_base; 1770 bdp = fep->rx_bd_base;
1902 for (i=0; i<RX_RING_SIZE; i++) { 1771 for (i = 0; i < RX_RING_SIZE; i++) {
1903 1772
1904 /* Initialize the BD for every fragment in the page. 1773 /* Initialize the BD for every fragment in the page. */
1905 */
1906 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1774 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1907 bdp++; 1775 bdp++;
1908 } 1776 }
1909 1777
1910 /* Set the last buffer to wrap. 1778 /* Set the last buffer to wrap */
1911 */
1912 bdp--; 1779 bdp--;
1913 bdp->cbd_sc |= BD_SC_WRAP; 1780 bdp->cbd_sc |= BD_SC_WRAP;
1914 1781
1915 /* ...and the same for transmmit. 1782 /* ...and the same for transmit */
1916 */
1917 bdp = fep->tx_bd_base; 1783 bdp = fep->tx_bd_base;
1918 for (i=0; i<TX_RING_SIZE; i++) { 1784 for (i = 0; i < TX_RING_SIZE; i++) {
1919 1785
1920 /* Initialize the BD for every fragment in the page. 1786 /* Initialize the BD for every fragment in the page. */
1921 */
1922 bdp->cbd_sc = 0; 1787 bdp->cbd_sc = 0;
1923 bdp->cbd_bufaddr = 0; 1788 bdp->cbd_bufaddr = 0;
1924 bdp++; 1789 bdp++;
1925 } 1790 }
1926 1791
1927 /* Set the last buffer to wrap. 1792 /* Set the last buffer to wrap */
1928 */
1929 bdp--; 1793 bdp--;
1930 bdp->cbd_sc |= BD_SC_WRAP; 1794 bdp->cbd_sc |= BD_SC_WRAP;
1931 1795
1932 /* Enable MII mode. 1796 /* Enable MII mode */
1933 */
1934 if (duplex) { 1797 if (duplex) {
1935 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */ 1798 /* MII enable / FD enable */
1936 fecp->fec_x_cntrl = 0x04; /* FD enable */ 1799 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1800 writel(0x04, fep->hwp + FEC_X_CNTRL);
1937 } else { 1801 } else {
1938 /* MII enable|No Rcv on Xmit */ 1802 /* MII enable / No Rcv on Xmit */
1939 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06; 1803 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1940 fecp->fec_x_cntrl = 0x00; 1804 writel(0x0, fep->hwp + FEC_X_CNTRL);
1941 } 1805 }
1942 fep->full_duplex = duplex; 1806 fep->full_duplex = duplex;
1943 1807
1944 /* Set MII speed. 1808 /* Set MII speed */
1945 */ 1809 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1946 fecp->fec_mii_speed = fep->phy_speed;
1947 1810
1948 /* And last, enable the transmit and receive processing. 1811 /* And last, enable the transmit and receive processing */
1949 */ 1812 writel(2, fep->hwp + FEC_ECNTRL);
1950 fecp->fec_ecntrl = 2; 1813 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1951 fecp->fec_r_des_active = 0;
1952 1814
1953 /* Enable interrupts we wish to service. 1815 /* Enable interrupts we wish to service */
1954 */ 1816 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
1955 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII); 1817 fep->hwp + FEC_IMASK);
1956} 1818}
1957 1819
1958static void 1820static void
1959fec_stop(struct net_device *dev) 1821fec_stop(struct net_device *dev)
1960{ 1822{
1961 volatile fec_t *fecp; 1823 struct fec_enet_private *fep = netdev_priv(dev);
1962 struct fec_enet_private *fep;
1963
1964 fep = netdev_priv(dev);
1965 fecp = fep->hwp;
1966 1824
1967 /* 1825 /* We cannot expect a graceful transmit stop without link !!! */
1968 ** We cannot expect a graceful transmit stop without link !!! 1826 if (fep->link) {
1969 */ 1827 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1970 if (fep->link)
1971 {
1972 fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */
1973 udelay(10); 1828 udelay(10);
1974 if (!(fecp->fec_ievent & FEC_ENET_GRA)) 1829 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1975 printk("fec_stop : Graceful transmit stop did not complete !\n"); 1830 printk("fec_stop : Graceful transmit stop did not complete !\n");
1976 } 1831 }
1977 1832
1978 /* Whack a reset. We should wait for this. 1833 /* Whack a reset. We should wait for this. */
1979 */ 1834 writel(1, fep->hwp + FEC_ECNTRL);
1980 fecp->fec_ecntrl = 1;
1981 udelay(10); 1835 udelay(10);
1982 1836
1983 /* Clear outstanding MII command interrupts. 1837 /* Clear outstanding MII command interrupts. */
1984 */ 1838 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
1985 fecp->fec_ievent = FEC_ENET_MII;
1986 1839
1987 fecp->fec_imask = FEC_ENET_MII; 1840 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1988 fecp->fec_mii_speed = fep->phy_speed; 1841 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1989} 1842}
1990 1843
1991static int __devinit 1844static int __devinit
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 76c64c92e190..30b7dd671336 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -20,82 +20,55 @@
20 * registers in the same peripheral device on different models 20 * registers in the same peripheral device on different models
21 * of the ColdFire! 21 * of the ColdFire!
22 */ 22 */
23typedef struct fec { 23#define FEC_IEVENT 0x004 /* Interrupt event reg */
24 unsigned long fec_reserved0; 24#define FEC_IMASK 0x008 /* Interrupt mask reg */
25 unsigned long fec_ievent; /* Interrupt event reg */ 25#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
26 unsigned long fec_imask; /* Interrupt mask reg */ 26#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
27 unsigned long fec_reserved1; 27#define FEC_ECNTRL 0x024 /* Ethernet control reg */
28 unsigned long fec_r_des_active; /* Receive descriptor reg */ 28#define FEC_MII_DATA 0x040 /* MII manage frame reg */
29 unsigned long fec_x_des_active; /* Transmit descriptor reg */ 29#define FEC_MII_SPEED 0x044 /* MII speed control reg */
30 unsigned long fec_reserved2[3]; 30#define FEC_MIB_CTRLSTAT 0x064 /* MIB control/status reg */
31 unsigned long fec_ecntrl; /* Ethernet control reg */ 31#define FEC_R_CNTRL 0x084 /* Receive control reg */
32 unsigned long fec_reserved3[6]; 32#define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */
33 unsigned long fec_mii_data; /* MII manage frame reg */ 33#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
34 unsigned long fec_mii_speed; /* MII speed control reg */ 34#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
35 unsigned long fec_reserved4[7]; 35#define FEC_OPD 0x0ec /* Opcode + Pause duration */
36 unsigned long fec_mib_ctrlstat; /* MIB control/status reg */ 36#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */
37 unsigned long fec_reserved5[7]; 37#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */
38 unsigned long fec_r_cntrl; /* Receive control reg */ 38#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */
39 unsigned long fec_reserved6[15]; 39#define FEC_GRP_HASH_TABLE_LOW 0x124 /* Low 32bits hash table */
40 unsigned long fec_x_cntrl; /* Transmit Control reg */ 40#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */
41 unsigned long fec_reserved7[7]; 41#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */
42 unsigned long fec_addr_low; /* Low 32bits MAC address */ 42#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
43 unsigned long fec_addr_high; /* High 16bits MAC address */ 43#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
44 unsigned long fec_opd; /* Opcode + Pause duration */ 44#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
45 unsigned long fec_reserved8[10]; 45#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
46 unsigned long fec_hash_table_high; /* High 32bits hash table */
47 unsigned long fec_hash_table_low; /* Low 32bits hash table */
48 unsigned long fec_grp_hash_table_high;/* High 32bits hash table */
49 unsigned long fec_grp_hash_table_low; /* Low 32bits hash table */
50 unsigned long fec_reserved9[7];
51 unsigned long fec_x_wmrk; /* FIFO transmit water mark */
52 unsigned long fec_reserved10;
53 unsigned long fec_r_bound; /* FIFO receive bound reg */
54 unsigned long fec_r_fstart; /* FIFO receive start reg */
55 unsigned long fec_reserved11[11];
56 unsigned long fec_r_des_start; /* Receive descriptor ring */
57 unsigned long fec_x_des_start; /* Transmit descriptor ring */
58 unsigned long fec_r_buff_size; /* Maximum receive buff size */
59} fec_t;
60 46
61#else 47#else
62 48
63/* 49#define FEC_ECNTRL; 0x000 /* Ethernet control reg */
64 * Define device register set address map. 50#define FEC_IEVENT; 0x004 /* Interrupt even reg */
65 */ 51#define FEC_IMASK; 0x008 /* Interrupt mask reg */
66typedef struct fec { 52#define FEC_IVEC; 0x00c /* Interrupt vec status reg */
67 unsigned long fec_ecntrl; /* Ethernet control reg */ 53#define FEC_R_DES_ACTIVE; 0x010 /* Receive descriptor reg */
68 unsigned long fec_ievent; /* Interrupt even reg */ 54#define FEC_X_DES_ACTIVE; 0x01c /* Transmit descriptor reg */
69 unsigned long fec_imask; /* Interrupt mask reg */ 55#define FEC_MII_DATA 0x040 /* MII manage frame reg */
70 unsigned long fec_ivec; /* Interrupt vec status reg */ 56#define FEC_MII_SPEED 0x044 /* MII speed control reg */
71 unsigned long fec_r_des_active; /* Receive descriptor reg */ 57#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
72 unsigned long fec_x_des_active; /* Transmit descriptor reg */ 58#define FEC_R_FSTART 0x090 /* FIFO receive start reg */
73 unsigned long fec_reserved1[10]; 59#define FEC_X_WMRK 0x0a4 /* FIFO transmit water mark */
74 unsigned long fec_mii_data; /* MII manage frame reg */ 60#define FEC_X_FSTART 0x0ac /* FIFO transmit start reg */
75 unsigned long fec_mii_speed; /* MII speed control reg */ 61#define FEC_R_CNTRL 0x104 /* Receive control reg */
76 unsigned long fec_reserved2[17]; 62#define FEC_MAX_FRM_LEN 0x108 /* Maximum frame length reg */
77 unsigned long fec_r_bound; /* FIFO receive bound reg */ 63#define FEC_X_CNTRL 0x144 /* Transmit Control reg */
78 unsigned long fec_r_fstart; /* FIFO receive start reg */ 64#define FEC_ADDR_LOW 0x3c0 /* Low 32bits MAC address */
79 unsigned long fec_reserved3[4]; 65#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */
80 unsigned long fec_x_wmrk; /* FIFO transmit water mark */ 66#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */
81 unsigned long fec_reserved4; 67#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */
82 unsigned long fec_x_fstart; /* FIFO transmit start reg */ 68#define FEC_R_DES_START 0x3d0 /* Receive descriptor ring */
83 unsigned long fec_reserved5[21]; 69#define FEC_X_DES_START 0x3d4 /* Transmit descriptor ring */
84 unsigned long fec_r_cntrl; /* Receive control reg */ 70#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */
85 unsigned long fec_max_frm_len; /* Maximum frame length reg */ 71#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
86 unsigned long fec_reserved6[14];
87 unsigned long fec_x_cntrl; /* Transmit Control reg */
88 unsigned long fec_reserved7[158];
89 unsigned long fec_addr_low; /* Low 32bits MAC address */
90 unsigned long fec_addr_high; /* High 16bits MAC address */
91 unsigned long fec_grp_hash_table_high;/* High 32bits hash table */
92 unsigned long fec_grp_hash_table_low; /* Low 32bits hash table */
93 unsigned long fec_r_des_start; /* Receive descriptor ring */
94 unsigned long fec_x_des_start; /* Transmit descriptor ring */
95 unsigned long fec_r_buff_size; /* Maximum receive buff size */
96 unsigned long reserved8[9];
97 unsigned long fec_fifo_ram[112]; /* FIFO RAM buffer */
98} fec_t;
99 72
100#endif /* CONFIG_M5272 */ 73#endif /* CONFIG_M5272 */
101 74
@@ -104,17 +77,17 @@ typedef struct fec {
104 * Define the buffer descriptor structure. 77 * Define the buffer descriptor structure.
105 */ 78 */
106#ifdef CONFIG_ARCH_MXC 79#ifdef CONFIG_ARCH_MXC
107typedef struct bufdesc { 80struct bufdesc {
108 unsigned short cbd_datlen; /* Data length */ 81 unsigned short cbd_datlen; /* Data length */
109 unsigned short cbd_sc; /* Control and status info */ 82 unsigned short cbd_sc; /* Control and status info */
110 unsigned long cbd_bufaddr; /* Buffer address */ 83 unsigned long cbd_bufaddr; /* Buffer address */
111} cbd_t; 84};
112#else 85#else
113typedef struct bufdesc { 86struct bufdesc {
114 unsigned short cbd_sc; /* Control and status info */ 87 unsigned short cbd_sc; /* Control and status info */
115 unsigned short cbd_datlen; /* Data length */ 88 unsigned short cbd_datlen; /* Data length */
116 unsigned long cbd_bufaddr; /* Buffer address */ 89 unsigned long cbd_bufaddr; /* Buffer address */
117} cbd_t; 90};
118#endif 91#endif
119 92
120/* 93/*
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 28077cc1b949..61aaae444b40 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -54,8 +54,7 @@ static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
54 fec_t __iomem *fecp = fec->fecp; 54 fec_t __iomem *fecp = fec->fecp;
55 int i, ret = -1; 55 int i, ret = -1;
56 56
57 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) 57 BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
58 BUG();
59 58
60 /* Add PHY address to register command. */ 59 /* Add PHY address to register command. */
61 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location)); 60 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
@@ -79,8 +78,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
79 int i; 78 int i;
80 79
81 /* this must never happen */ 80 /* this must never happen */
82 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) 81 BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
83 BUG();
84 82
85 /* Add PHY address to register command. */ 83 /* Add PHY address to register command. */
86 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val)); 84 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index 2e802634d366..3e3528ade259 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -71,6 +71,19 @@ static struct dio_driver hplance_driver = {
71 .remove = __devexit_p(hplance_remove_one), 71 .remove = __devexit_p(hplance_remove_one),
72}; 72};
73 73
74static const struct net_device_ops hplance_netdev_ops = {
75 .ndo_open = hplance_open,
76 .ndo_stop = hplance_close,
77 .ndo_start_xmit = lance_start_xmit,
78 .ndo_set_multicast_list = lance_set_multicast,
79 .ndo_change_mtu = eth_change_mtu,
80 .ndo_validate_addr = eth_validate_addr,
81 .ndo_set_mac_address = eth_mac_addr,
82#ifdef CONFIG_NET_POLL_CONTROLLER
83 .ndo_poll_controller = lance_poll,
84#endif
85};
86
74/* Find all the HP Lance boards and initialise them... */ 87/* Find all the HP Lance boards and initialise them... */
75static int __devinit hplance_init_one(struct dio_dev *d, 88static int __devinit hplance_init_one(struct dio_dev *d,
76 const struct dio_device_id *ent) 89 const struct dio_device_id *ent)
@@ -135,13 +148,7 @@ static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
135 148
136 /* Fill the dev fields */ 149 /* Fill the dev fields */
137 dev->base_addr = va; 150 dev->base_addr = va;
138 dev->open = &hplance_open; 151 dev->netdev_ops = &hplance_netdev_ops;
139 dev->stop = &hplance_close;
140#ifdef CONFIG_NET_POLL_CONTROLLER
141 dev->poll_controller = lance_poll;
142#endif
143 dev->hard_start_xmit = &lance_start_xmit;
144 dev->set_multicast_list = &lance_set_multicast;
145 dev->dma = 0; 152 dev->dma = 0;
146 153
147 for (i=0; i<6; i++) { 154 for (i=0; i<6; i++) {
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 5c6315df86b9..0a51b0bd1e49 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1203,6 +1203,20 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1203 return ret; 1203 return ret;
1204} 1204}
1205 1205
1206static const struct net_device_ops ibmveth_netdev_ops = {
1207 .ndo_open = ibmveth_open,
1208 .ndo_stop = ibmveth_close,
1209 .ndo_start_xmit = ibmveth_start_xmit,
1210 .ndo_set_multicast_list = ibmveth_set_multicast_list,
1211 .ndo_do_ioctl = ibmveth_ioctl,
1212 .ndo_change_mtu = ibmveth_change_mtu,
1213 .ndo_validate_addr = eth_validate_addr,
1214 .ndo_set_mac_address = eth_mac_addr,
1215#ifdef CONFIG_NET_POLL_CONTROLLER
1216 .ndo_poll_controller = ibmveth_poll_controller,
1217#endif
1218};
1219
1206static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 1220static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1207{ 1221{
1208 int rc, i; 1222 int rc, i;
@@ -1265,17 +1279,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1265 memcpy(&adapter->mac_addr, mac_addr_p, 6); 1279 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1266 1280
1267 netdev->irq = dev->irq; 1281 netdev->irq = dev->irq;
1268 netdev->open = ibmveth_open; 1282 netdev->netdev_ops = &ibmveth_netdev_ops;
1269 netdev->stop = ibmveth_close; 1283 netdev->ethtool_ops = &netdev_ethtool_ops;
1270 netdev->hard_start_xmit = ibmveth_start_xmit;
1271 netdev->set_multicast_list = ibmveth_set_multicast_list;
1272 netdev->do_ioctl = ibmveth_ioctl;
1273 netdev->ethtool_ops = &netdev_ethtool_ops;
1274 netdev->change_mtu = ibmveth_change_mtu;
1275 SET_NETDEV_DEV(netdev, &dev->dev); 1284 SET_NETDEV_DEV(netdev, &dev->dev);
1276#ifdef CONFIG_NET_POLL_CONTROLLER
1277 netdev->poll_controller = ibmveth_poll_controller;
1278#endif
1279 netdev->features |= NETIF_F_LLTX; 1285 netdev->features |= NETIF_F_LLTX;
1280 spin_lock_init(&adapter->stats_lock); 1286 spin_lock_init(&adapter->stats_lock);
1281 1287
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 08c801490c72..183235d46aee 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -942,6 +942,8 @@ int igb_up(struct igb_adapter *adapter)
942 rd32(E1000_ICR); 942 rd32(E1000_ICR);
943 igb_irq_enable(adapter); 943 igb_irq_enable(adapter);
944 944
945 netif_tx_start_all_queues(adapter->netdev);
946
945 /* Fire a link change interrupt to start the watchdog. */ 947 /* Fire a link change interrupt to start the watchdog. */
946 wr32(E1000_ICS, E1000_ICS_LSC); 948 wr32(E1000_ICS, E1000_ICS_LSC);
947 return 0; 949 return 0;
@@ -1442,15 +1444,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1442 * driver. */ 1444 * driver. */
1443 igb_get_hw_control(adapter); 1445 igb_get_hw_control(adapter);
1444 1446
1445 /* tell the stack to leave us alone until igb_open() is called */
1446 netif_carrier_off(netdev);
1447 netif_tx_stop_all_queues(netdev);
1448
1449 strcpy(netdev->name, "eth%d"); 1447 strcpy(netdev->name, "eth%d");
1450 err = register_netdev(netdev); 1448 err = register_netdev(netdev);
1451 if (err) 1449 if (err)
1452 goto err_register; 1450 goto err_register;
1453 1451
1452 /* carrier off reporting is important to ethtool even BEFORE open */
1453 netif_carrier_off(netdev);
1454
1454#ifdef CONFIG_IGB_DCA 1455#ifdef CONFIG_IGB_DCA
1455 if (dca_add_requester(&pdev->dev) == 0) { 1456 if (dca_add_requester(&pdev->dev) == 0) {
1456 adapter->flags |= IGB_FLAG_DCA_ENABLED; 1457 adapter->flags |= IGB_FLAG_DCA_ENABLED;
@@ -1699,6 +1700,8 @@ static int igb_open(struct net_device *netdev)
1699 if (test_bit(__IGB_TESTING, &adapter->state)) 1700 if (test_bit(__IGB_TESTING, &adapter->state))
1700 return -EBUSY; 1701 return -EBUSY;
1701 1702
1703 netif_carrier_off(netdev);
1704
1702 /* allocate transmit descriptors */ 1705 /* allocate transmit descriptors */
1703 err = igb_setup_all_tx_resources(adapter); 1706 err = igb_setup_all_tx_resources(adapter);
1704 if (err) 1707 if (err)
@@ -2663,7 +2666,6 @@ static void igb_watchdog_task(struct work_struct *work)
2663 } 2666 }
2664 2667
2665 netif_carrier_on(netdev); 2668 netif_carrier_on(netdev);
2666 netif_tx_wake_all_queues(netdev);
2667 2669
2668 igb_ping_all_vfs(adapter); 2670 igb_ping_all_vfs(adapter);
2669 2671
@@ -2680,7 +2682,6 @@ static void igb_watchdog_task(struct work_struct *work)
2680 printk(KERN_INFO "igb: %s NIC Link is Down\n", 2682 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2681 netdev->name); 2683 netdev->name);
2682 netif_carrier_off(netdev); 2684 netif_carrier_off(netdev);
2683 netif_tx_stop_all_queues(netdev);
2684 2685
2685 igb_ping_all_vfs(adapter); 2686 igb_ping_all_vfs(adapter);
2686 2687
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 941164076a2b..269153eedd26 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/rtnetlink.h> 28#include <linux/rtnetlink.h>
28#include <linux/interrupt.h> 29#include <linux/interrupt.h>
@@ -198,6 +199,17 @@ static int au1k_irda_init_iobuf(iobuff_t *io, int size)
198 return io->head ? 0 : -ENOMEM; 199 return io->head ? 0 : -ENOMEM;
199} 200}
200 201
202static const struct net_device_ops au1k_irda_netdev_ops = {
203 .ndo_open = au1k_irda_start,
204 .ndo_stop = au1k_irda_stop,
205 .ndo_start_xmit = au1k_irda_hard_xmit,
206 .ndo_tx_timeout = au1k_tx_timeout,
207 .ndo_do_ioctl = au1k_irda_ioctl,
208 .ndo_change_mtu = eth_change_mtu,
209 .ndo_validate_addr = eth_validate_addr,
210 .ndo_set_mac_address = eth_mac_addr,
211};
212
201static int au1k_irda_net_init(struct net_device *dev) 213static int au1k_irda_net_init(struct net_device *dev)
202{ 214{
203 struct au1k_private *aup = netdev_priv(dev); 215 struct au1k_private *aup = netdev_priv(dev);
@@ -209,11 +221,7 @@ static int au1k_irda_net_init(struct net_device *dev)
209 if (err) 221 if (err)
210 goto out1; 222 goto out1;
211 223
212 dev->open = au1k_irda_start; 224 dev->netdev_ops = &au1k_irda_netdev_ops;
213 dev->hard_start_xmit = au1k_irda_hard_xmit;
214 dev->stop = au1k_irda_stop;
215 dev->do_ioctl = au1k_irda_ioctl;
216 dev->tx_timeout = au1k_tx_timeout;
217 225
218 irda_init_max_qos_capabilies(&aup->qos); 226 irda_init_max_qos_capabilies(&aup->qos);
219 227
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index e775338b525f..3376a4f39e0a 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
18#include <linux/clk.h> 19#include <linux/clk.h>
19 20
@@ -797,6 +798,16 @@ static int pxa_irda_init_iobuf(iobuff_t *io, int size)
797 return io->head ? 0 : -ENOMEM; 798 return io->head ? 0 : -ENOMEM;
798} 799}
799 800
801static const struct net_device_ops pxa_irda_netdev_ops = {
802 .ndo_open = pxa_irda_start,
803 .ndo_stop = pxa_irda_stop,
804 .ndo_start_xmit = pxa_irda_hard_xmit,
805 .ndo_do_ioctl = pxa_irda_ioctl,
806 .ndo_change_mtu = eth_change_mtu,
807 .ndo_validate_addr = eth_validate_addr,
808 .ndo_set_mac_address = eth_mac_addr,
809};
810
800static int pxa_irda_probe(struct platform_device *pdev) 811static int pxa_irda_probe(struct platform_device *pdev)
801{ 812{
802 struct net_device *dev; 813 struct net_device *dev;
@@ -845,10 +856,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
845 if (err) 856 if (err)
846 goto err_startup; 857 goto err_startup;
847 858
848 dev->hard_start_xmit = pxa_irda_hard_xmit; 859 dev->netdev_ops = &pxa_irda_netdev_ops;
849 dev->open = pxa_irda_start;
850 dev->stop = pxa_irda_stop;
851 dev->do_ioctl = pxa_irda_ioctl;
852 860
853 irda_init_max_qos_capabilies(&si->qos); 861 irda_init_max_qos_capabilies(&si->qos);
854 862
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 7a2b003954ca..2aeb2e6aec1b 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -24,6 +24,7 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/rtnetlink.h> 29#include <linux/rtnetlink.h>
29#include <linux/interrupt.h> 30#include <linux/interrupt.h>
@@ -875,6 +876,16 @@ static int sa1100_irda_init_iobuf(iobuff_t *io, int size)
875 return io->head ? 0 : -ENOMEM; 876 return io->head ? 0 : -ENOMEM;
876} 877}
877 878
879static const struct net_device_ops sa1100_irda_netdev_ops = {
880 .ndo_open = sa1100_irda_start,
881 .ndo_stop = sa1100_irda_stop,
882 .ndo_start_xmit = sa1100_irda_hard_xmit,
883 .ndo_do_ioctl = sa1100_irda_ioctl,
884 .ndo_change_mtu = eth_change_mtu,
885 .ndo_validate_addr = eth_validate_addr,
886 .ndo_set_mac_address = eth_mac_addr,
887};
888
878static int sa1100_irda_probe(struct platform_device *pdev) 889static int sa1100_irda_probe(struct platform_device *pdev)
879{ 890{
880 struct net_device *dev; 891 struct net_device *dev;
@@ -913,11 +924,8 @@ static int sa1100_irda_probe(struct platform_device *pdev)
913 if (err) 924 if (err)
914 goto err_mem_5; 925 goto err_mem_5;
915 926
916 dev->hard_start_xmit = sa1100_irda_hard_xmit; 927 dev->netdev_ops = &sa1100_irda_netdev_ops;
917 dev->open = sa1100_irda_start; 928 dev->irq = IRQ_Ser2ICP;
918 dev->stop = sa1100_irda_stop;
919 dev->do_ioctl = sa1100_irda_ioctl;
920 dev->irq = IRQ_Ser2ICP;
921 929
922 irda_init_max_qos_capabilies(&si->qos); 930 irda_init_max_qos_capabilies(&si->qos);
923 931
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index cb793c2bade2..e44215cb1882 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1021,6 +1021,16 @@ static const struct ethtool_ops ops = {
1021 .get_link = veth_get_link, 1021 .get_link = veth_get_link,
1022}; 1022};
1023 1023
1024static const struct net_device_ops veth_netdev_ops = {
1025 .ndo_open = veth_open,
1026 .ndo_stop = veth_close,
1027 .ndo_start_xmit = veth_start_xmit,
1028 .ndo_change_mtu = veth_change_mtu,
1029 .ndo_set_multicast_list = veth_set_multicast_list,
1030 .ndo_set_mac_address = NULL,
1031 .ndo_validate_addr = eth_validate_addr,
1032};
1033
1024static struct net_device *veth_probe_one(int vlan, 1034static struct net_device *veth_probe_one(int vlan,
1025 struct vio_dev *vio_dev) 1035 struct vio_dev *vio_dev)
1026{ 1036{
@@ -1067,12 +1077,7 @@ static struct net_device *veth_probe_one(int vlan,
1067 1077
1068 memcpy(&port->mac_addr, mac_addr, ETH_ALEN); 1078 memcpy(&port->mac_addr, mac_addr, ETH_ALEN);
1069 1079
1070 dev->open = veth_open; 1080 dev->netdev_ops = &veth_netdev_ops;
1071 dev->hard_start_xmit = veth_start_xmit;
1072 dev->stop = veth_close;
1073 dev->change_mtu = veth_change_mtu;
1074 dev->set_mac_address = NULL;
1075 dev->set_multicast_list = veth_set_multicast_list;
1076 SET_ETHTOOL_OPS(dev, &ops); 1081 SET_ETHTOOL_OPS(dev, &ops);
1077 1082
1078 SET_NETDEV_DEV(dev, vdev); 1083 SET_NETDEV_DEV(dev, vdev);
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 4a0826b8f6f2..cb9ecc48f6d0 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -266,6 +266,8 @@ ixgb_up(struct ixgb_adapter *adapter)
266 napi_enable(&adapter->napi); 266 napi_enable(&adapter->napi);
267 ixgb_irq_enable(adapter); 267 ixgb_irq_enable(adapter);
268 268
269 netif_wake_queue(netdev);
270
269 mod_timer(&adapter->watchdog_timer, jiffies); 271 mod_timer(&adapter->watchdog_timer, jiffies);
270 272
271 return 0; 273 return 0;
@@ -471,10 +473,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
471 if (err) 473 if (err)
472 goto err_register; 474 goto err_register;
473 475
474 /* we're going to reset, so assume we have no link for now */ 476 /* carrier off reporting is important to ethtool even BEFORE open */
475
476 netif_carrier_off(netdev); 477 netif_carrier_off(netdev);
477 netif_stop_queue(netdev);
478 478
479 DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n"); 479 DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
480 ixgb_check_options(adapter); 480 ixgb_check_options(adapter);
@@ -592,6 +592,8 @@ ixgb_open(struct net_device *netdev)
592 if (err) 592 if (err)
593 goto err_setup_tx; 593 goto err_setup_tx;
594 594
595 netif_carrier_off(netdev);
596
595 /* allocate receive descriptors */ 597 /* allocate receive descriptors */
596 598
597 err = ixgb_setup_rx_resources(adapter); 599 err = ixgb_setup_rx_resources(adapter);
@@ -602,6 +604,8 @@ ixgb_open(struct net_device *netdev)
602 if (err) 604 if (err)
603 goto err_up; 605 goto err_up;
604 606
607 netif_start_queue(netdev);
608
605 return 0; 609 return 0;
606 610
607err_up: 611err_up:
@@ -1116,7 +1120,6 @@ ixgb_watchdog(unsigned long data)
1116 adapter->link_speed = 10000; 1120 adapter->link_speed = 10000;
1117 adapter->link_duplex = FULL_DUPLEX; 1121 adapter->link_duplex = FULL_DUPLEX;
1118 netif_carrier_on(netdev); 1122 netif_carrier_on(netdev);
1119 netif_wake_queue(netdev);
1120 } 1123 }
1121 } else { 1124 } else {
1122 if (netif_carrier_ok(netdev)) { 1125 if (netif_carrier_ok(netdev)) {
@@ -1125,8 +1128,6 @@ ixgb_watchdog(unsigned long data)
1125 printk(KERN_INFO "ixgb: %s NIC Link is Down\n", 1128 printk(KERN_INFO "ixgb: %s NIC Link is Down\n",
1126 netdev->name); 1129 netdev->name);
1127 netif_carrier_off(netdev); 1130 netif_carrier_off(netdev);
1128 netif_stop_queue(netdev);
1129
1130 } 1131 }
1131 } 1132 }
1132 1133
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index d92e72bd627a..371a6be4d965 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -40,7 +40,7 @@
40#include <linux/sched.h> 40#include <linux/sched.h>
41 41
42#undef ASSERT 42#undef ASSERT
43#define ASSERT(x) if (!(x)) BUG() 43#define ASSERT(x) BUG_ON(!(x))
44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) 44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
45 45
46#ifdef DBG 46#ifdef DBG
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 4791238c3f6e..03eb54f4f1cc 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -75,18 +75,49 @@ static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
75static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 75static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
76{ 76{
77 struct ixgbe_mac_info *mac = &hw->mac; 77 struct ixgbe_mac_info *mac = &hw->mac;
78
79 /* Call PHY identify routine to get the phy type */
80 ixgbe_identify_phy_generic(hw);
81
82 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
83 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
84 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
85 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
86 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
87 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
88
89 return 0;
90}
91
92/**
93 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
94 * @hw: pointer to hardware structure
95 *
96 * Initialize any function pointers that were not able to be
97 * set during get_invariants because the PHY/SFP type was
98 * not known. Perform the SFP init if necessary.
99 *
100 **/
101s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
102{
103 struct ixgbe_mac_info *mac = &hw->mac;
78 struct ixgbe_phy_info *phy = &hw->phy; 104 struct ixgbe_phy_info *phy = &hw->phy;
79 s32 ret_val = 0; 105 s32 ret_val = 0;
80 u16 list_offset, data_offset; 106 u16 list_offset, data_offset;
81 107
82 /* Set the bus information prior to PHY identification */ 108 /* Identify the PHY */
83 mac->ops.get_bus_info(hw); 109 phy->ops.identify(hw);
84 110
85 /* Call PHY identify routine to get the phy type */ 111 /* Overwrite the link function pointers if copper PHY */
86 ixgbe_identify_phy_generic(hw); 112 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
113 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
114 mac->ops.setup_link_speed =
115 &ixgbe_setup_copper_link_speed_82598;
116 mac->ops.get_link_capabilities =
117 &ixgbe_get_copper_link_capabilities_82598;
118 }
87 119
88 /* PHY Init */ 120 switch (hw->phy.type) {
89 switch (phy->type) {
90 case ixgbe_phy_tn: 121 case ixgbe_phy_tn:
91 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 122 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
92 phy->ops.get_firmware_version = 123 phy->ops.get_firmware_version =
@@ -106,8 +137,8 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
106 137
107 /* Check to see if SFP+ module is supported */ 138 /* Check to see if SFP+ module is supported */
108 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 139 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
109 &list_offset, 140 &list_offset,
110 &data_offset); 141 &data_offset);
111 if (ret_val != 0) { 142 if (ret_val != 0) {
112 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 143 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
113 goto out; 144 goto out;
@@ -117,21 +148,6 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
117 break; 148 break;
118 } 149 }
119 150
120 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
121 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
122 mac->ops.setup_link_speed =
123 &ixgbe_setup_copper_link_speed_82598;
124 mac->ops.get_link_capabilities =
125 &ixgbe_get_copper_link_capabilities_82598;
126 }
127
128 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
129 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
130 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
131 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
132 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
133 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
134
135out: 151out:
136 return ret_val; 152 return ret_val;
137} 153}
@@ -149,12 +165,19 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
149 bool *autoneg) 165 bool *autoneg)
150{ 166{
151 s32 status = 0; 167 s32 status = 0;
168 u32 autoc = 0;
152 169
153 /* 170 /*
154 * Determine link capabilities based on the stored value of AUTOC, 171 * Determine link capabilities based on the stored value of AUTOC,
155 * which represents EEPROM defaults. 172 * which represents EEPROM defaults. If AUTOC value has not been
173 * stored, use the current register value.
156 */ 174 */
157 switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) { 175 if (hw->mac.orig_link_settings_stored)
176 autoc = hw->mac.orig_autoc;
177 else
178 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
179
180 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
158 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 181 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
159 *speed = IXGBE_LINK_SPEED_1GB_FULL; 182 *speed = IXGBE_LINK_SPEED_1GB_FULL;
160 *autoneg = false; 183 *autoneg = false;
@@ -173,9 +196,9 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
173 case IXGBE_AUTOC_LMS_KX4_AN: 196 case IXGBE_AUTOC_LMS_KX4_AN:
174 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 197 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
175 *speed = IXGBE_LINK_SPEED_UNKNOWN; 198 *speed = IXGBE_LINK_SPEED_UNKNOWN;
176 if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP) 199 if (autoc & IXGBE_AUTOC_KX4_SUPP)
177 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 200 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
178 if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP) 201 if (autoc & IXGBE_AUTOC_KX_SUPP)
179 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 202 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
180 *autoneg = true; 203 *autoneg = true;
181 break; 204 break;
@@ -322,6 +345,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
322 } 345 }
323 346
324 /* Enable 802.3x based flow control settings. */ 347 /* Enable 802.3x based flow control settings. */
348 fctrl_reg |= IXGBE_FCTRL_DPF;
325 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 349 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
326 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 350 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
327 351
@@ -380,9 +404,11 @@ static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
380 * because it causes the controller to just blast out fc packets. 404 * because it causes the controller to just blast out fc packets.
381 */ 405 */
382 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { 406 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
383 hw_dbg(hw, "Invalid water mark configuration\n"); 407 if (hw->fc.requested_mode != ixgbe_fc_none) {
384 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 408 hw_dbg(hw, "Invalid water mark configuration\n");
385 goto out; 409 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
410 goto out;
411 }
386 } 412 }
387 413
388 /* 414 /*
@@ -716,14 +742,23 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
716 } 742 }
717 743
718 /* Reset PHY */ 744 /* Reset PHY */
719 if (hw->phy.reset_disable == false) 745 if (hw->phy.reset_disable == false) {
746 /* PHY ops must be identified and initialized prior to reset */
747
748 /* Init PHY and function pointers, perform SFP setup */
749 status = hw->phy.ops.init(hw);
750 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
751 goto reset_hw_out;
752
720 hw->phy.ops.reset(hw); 753 hw->phy.ops.reset(hw);
754 }
721 755
722 /* 756 /*
723 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 757 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
724 * access and verify no pending requests before reset 758 * access and verify no pending requests before reset
725 */ 759 */
726 if (ixgbe_disable_pcie_master(hw) != 0) { 760 status = ixgbe_disable_pcie_master(hw);
761 if (status != 0) {
727 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 762 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
728 hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); 763 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
729 } 764 }
@@ -770,6 +805,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
770 /* Store the permanent mac address */ 805 /* Store the permanent mac address */
771 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 806 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
772 807
808reset_hw_out:
773 return status; 809 return status;
774} 810}
775 811
@@ -998,35 +1034,56 @@ out:
998static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 1034static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
999{ 1035{
1000 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1036 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1037 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1038 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1039 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1040 u16 ext_ability = 0;
1041
1042 hw->phy.ops.identify(hw);
1043
1044 /* Copper PHY must be checked before AUTOC LMS to determine correct
1045 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1046 if (hw->phy.type == ixgbe_phy_tn ||
1047 hw->phy.type == ixgbe_phy_cu_unknown) {
1048 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1049 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1050 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1051 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1052 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1053 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1054 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1055 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1056 goto out;
1057 }
1001 1058
1002 switch (hw->device_id) { 1059 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1003 case IXGBE_DEV_ID_82598: 1060 case IXGBE_AUTOC_LMS_1G_AN:
1004 /* Default device ID is mezzanine card KX/KX4 */ 1061 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1005 physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | 1062 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1006 IXGBE_PHYSICAL_LAYER_1000BASE_KX); 1063 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1007 break; 1064 else
1008 case IXGBE_DEV_ID_82598_BX: 1065 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1009 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1010 case IXGBE_DEV_ID_82598EB_CX4:
1011 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
1012 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1013 break;
1014 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1015 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1016 break; 1066 break;
1017 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 1067 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1018 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 1068 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1019 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 1069 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1020 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1070 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1071 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1072 else /* XAUI */
1073 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1021 break; 1074 break;
1022 case IXGBE_DEV_ID_82598EB_XF_LR: 1075 case IXGBE_AUTOC_LMS_KX4_AN:
1023 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1076 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1077 if (autoc & IXGBE_AUTOC_KX_SUPP)
1078 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1079 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1080 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1024 break; 1081 break;
1025 case IXGBE_DEV_ID_82598AT: 1082 default:
1026 physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T |
1027 IXGBE_PHYSICAL_LAYER_1000BASE_T);
1028 break; 1083 break;
1029 case IXGBE_DEV_ID_82598EB_SFP_LOM: 1084 }
1085
1086 if (hw->phy.type == ixgbe_phy_nl) {
1030 hw->phy.ops.identify_sfp(hw); 1087 hw->phy.ops.identify_sfp(hw);
1031 1088
1032 switch (hw->phy.sfp_type) { 1089 switch (hw->phy.sfp_type) {
@@ -1043,13 +1100,25 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1043 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1100 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1044 break; 1101 break;
1045 } 1102 }
1046 break; 1103 }
1047 1104
1105 switch (hw->device_id) {
1106 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1107 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1108 break;
1109 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1110 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1111 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1112 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1113 break;
1114 case IXGBE_DEV_ID_82598EB_XF_LR:
1115 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1116 break;
1048 default: 1117 default:
1049 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1050 break; 1118 break;
1051 } 1119 }
1052 1120
1121out:
1053 return physical_layer; 1122 return physical_layer;
1054} 1123}
1055 1124
@@ -1099,6 +1168,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1099static struct ixgbe_phy_operations phy_ops_82598 = { 1168static struct ixgbe_phy_operations phy_ops_82598 = {
1100 .identify = &ixgbe_identify_phy_generic, 1169 .identify = &ixgbe_identify_phy_generic,
1101 .identify_sfp = &ixgbe_identify_sfp_module_generic, 1170 .identify_sfp = &ixgbe_identify_sfp_module_generic,
1171 .init = &ixgbe_init_phy_ops_82598,
1102 .reset = &ixgbe_reset_phy_generic, 1172 .reset = &ixgbe_reset_phy_generic,
1103 .read_reg = &ixgbe_read_phy_reg_generic, 1173 .read_reg = &ixgbe_read_phy_reg_generic,
1104 .write_reg = &ixgbe_write_phy_reg_generic, 1174 .write_reg = &ixgbe_write_phy_reg_generic,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 29771fbaa42d..9e824b450416 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -100,6 +100,9 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
100 100
101 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 101 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
102 ixgbe_init_mac_link_ops_82599(hw); 102 ixgbe_init_mac_link_ops_82599(hw);
103
104 hw->phy.ops.reset = NULL;
105
103 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 106 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
104 &data_offset); 107 &data_offset);
105 108
@@ -146,51 +149,60 @@ u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
146static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) 149static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
147{ 150{
148 struct ixgbe_mac_info *mac = &hw->mac; 151 struct ixgbe_mac_info *mac = &hw->mac;
149 struct ixgbe_phy_info *phy = &hw->phy;
150 s32 ret_val;
151 152
152 /* Set the bus information prior to PHY identification */ 153 ixgbe_init_mac_link_ops_82599(hw);
153 mac->ops.get_bus_info(hw);
154 154
155 /* Call PHY identify routine to get the Cu or SFI phy type */ 155 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
156 ret_val = phy->ops.identify(hw); 156 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
157 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
158 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
159 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
160 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
157 161
158 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 162 return 0;
159 goto get_invariants_out; 163}
160 164
161 ixgbe_init_mac_link_ops_82599(hw); 165/**
166 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
167 * @hw: pointer to hardware structure
168 *
169 * Initialize any function pointers that were not able to be
170 * set during get_invariants because the PHY/SFP type was
171 * not known. Perform the SFP init if necessary.
172 *
173 **/
174s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
175{
176 struct ixgbe_mac_info *mac = &hw->mac;
177 struct ixgbe_phy_info *phy = &hw->phy;
178 s32 ret_val = 0;
162 179
163 /* Setup SFP module if there is one present. */ 180 /* Identify the PHY or SFP module */
164 ret_val = mac->ops.setup_sfp(hw); 181 ret_val = phy->ops.identify(hw);
182
183 /* Setup function pointers based on detected SFP module and speeds */
184 ixgbe_init_mac_link_ops_82599(hw);
165 185
166 /* If copper media, overwrite with copper function pointers */ 186 /* If copper media, overwrite with copper function pointers */
167 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 187 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
168 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 188 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
169 mac->ops.setup_link_speed = 189 mac->ops.setup_link_speed =
170 &ixgbe_setup_copper_link_speed_82599; 190 &ixgbe_setup_copper_link_speed_82599;
171 mac->ops.get_link_capabilities = 191 mac->ops.get_link_capabilities =
172 &ixgbe_get_copper_link_capabilities_82599; 192 &ixgbe_get_copper_link_capabilities_82599;
173 } 193 }
174 194
175 /* PHY Init */ 195 /* Set necessary function pointers based on phy type */
176 switch (hw->phy.type) { 196 switch (hw->phy.type) {
177 case ixgbe_phy_tn: 197 case ixgbe_phy_tn:
178 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 198 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
179 phy->ops.get_firmware_version = 199 phy->ops.get_firmware_version =
180 &ixgbe_get_phy_firmware_version_tnx; 200 &ixgbe_get_phy_firmware_version_tnx;
181 break; 201 break;
182 default: 202 default:
183 break; 203 break;
184 } 204 }
185 205
186 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
187 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
188 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
189 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
190 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
191 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
192
193get_invariants_out:
194 return ret_val; 206 return ret_val;
195} 207}
196 208
@@ -207,8 +219,19 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
207 bool *negotiation) 219 bool *negotiation)
208{ 220{
209 s32 status = 0; 221 s32 status = 0;
222 u32 autoc = 0;
223
224 /*
225 * Determine link capabilities based on the stored value of AUTOC,
226 * which represents EEPROM defaults. If AUTOC value has not been
227 * stored, use the current register value.
228 */
229 if (hw->mac.orig_link_settings_stored)
230 autoc = hw->mac.orig_autoc;
231 else
232 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
210 233
211 switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) { 234 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
212 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 235 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
213 *speed = IXGBE_LINK_SPEED_1GB_FULL; 236 *speed = IXGBE_LINK_SPEED_1GB_FULL;
214 *negotiation = false; 237 *negotiation = false;
@@ -232,22 +255,22 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
232 case IXGBE_AUTOC_LMS_KX4_KX_KR: 255 case IXGBE_AUTOC_LMS_KX4_KX_KR:
233 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 256 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
234 *speed = IXGBE_LINK_SPEED_UNKNOWN; 257 *speed = IXGBE_LINK_SPEED_UNKNOWN;
235 if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP) 258 if (autoc & IXGBE_AUTOC_KR_SUPP)
236 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 259 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
237 if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP) 260 if (autoc & IXGBE_AUTOC_KX4_SUPP)
238 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 261 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
239 if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP) 262 if (autoc & IXGBE_AUTOC_KX_SUPP)
240 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 263 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
241 *negotiation = true; 264 *negotiation = true;
242 break; 265 break;
243 266
244 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 267 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
245 *speed = IXGBE_LINK_SPEED_100_FULL; 268 *speed = IXGBE_LINK_SPEED_100_FULL;
246 if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP) 269 if (autoc & IXGBE_AUTOC_KR_SUPP)
247 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 270 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
248 if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP) 271 if (autoc & IXGBE_AUTOC_KX4_SUPP)
249 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 272 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
250 if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP) 273 if (autoc & IXGBE_AUTOC_KX_SUPP)
251 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 274 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
252 *negotiation = true; 275 *negotiation = true;
253 break; 276 break;
@@ -558,6 +581,7 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
558 s32 status = 0; 581 s32 status = 0;
559 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 582 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
560 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 583 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
584 u32 orig_autoc = 0;
561 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 585 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
562 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 586 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
563 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 587 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
@@ -569,6 +593,13 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
569 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); 593 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
570 speed &= link_capabilities; 594 speed &= link_capabilities;
571 595
596 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
597 if (hw->mac.orig_link_settings_stored)
598 orig_autoc = hw->mac.orig_autoc;
599 else
600 orig_autoc = autoc;
601
602
572 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 603 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
573 status = IXGBE_ERR_LINK_SETUP; 604 status = IXGBE_ERR_LINK_SETUP;
574 } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 605 } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
@@ -577,9 +608,9 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
577 /* Set KX4/KX/KR support according to speed requested */ 608 /* Set KX4/KX/KR support according to speed requested */
578 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 609 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
579 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 610 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
580 if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP) 611 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
581 autoc |= IXGBE_AUTOC_KX4_SUPP; 612 autoc |= IXGBE_AUTOC_KX4_SUPP;
582 if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP) 613 if (orig_autoc & IXGBE_AUTOC_KR_SUPP)
583 autoc |= IXGBE_AUTOC_KR_SUPP; 614 autoc |= IXGBE_AUTOC_KR_SUPP;
584 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 615 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
585 autoc |= IXGBE_AUTOC_KX_SUPP; 616 autoc |= IXGBE_AUTOC_KX_SUPP;
@@ -705,14 +736,30 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
705 /* Call adapter stop to disable tx/rx and clear interrupts */ 736 /* Call adapter stop to disable tx/rx and clear interrupts */
706 hw->mac.ops.stop_adapter(hw); 737 hw->mac.ops.stop_adapter(hw);
707 738
739 /* PHY ops must be identified and initialized prior to reset */
740
741 /* Init PHY and function pointers, perform SFP setup */
742 status = hw->phy.ops.init(hw);
743
744 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
745 goto reset_hw_out;
746
747 /* Setup SFP module if there is one present. */
748 if (hw->phy.sfp_setup_needed) {
749 status = hw->mac.ops.setup_sfp(hw);
750 hw->phy.sfp_setup_needed = false;
751 }
752
708 /* Reset PHY */ 753 /* Reset PHY */
709 hw->phy.ops.reset(hw); 754 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
755 hw->phy.ops.reset(hw);
710 756
711 /* 757 /*
712 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 758 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
713 * access and verify no pending requests before reset 759 * access and verify no pending requests before reset
714 */ 760 */
715 if (ixgbe_disable_pcie_master(hw) != 0) { 761 status = ixgbe_disable_pcie_master(hw);
762 if (status != 0) {
716 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 763 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
717 hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); 764 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
718 } 765 }
@@ -773,6 +820,7 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
773 /* Store the permanent mac address */ 820 /* Store the permanent mac address */
774 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 821 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
775 822
823reset_hw_out:
776 return status; 824 return status;
777} 825}
778 826
@@ -1093,53 +1141,98 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1093u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 1141u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1094{ 1142{
1095 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1143 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1144 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1145 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1146 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1147 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1148 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1149 u16 ext_ability = 0;
1096 u8 comp_codes_10g = 0; 1150 u8 comp_codes_10g = 0;
1097 1151
1098 switch (hw->device_id) { 1152 hw->phy.ops.identify(hw);
1099 case IXGBE_DEV_ID_82599: 1153
1100 case IXGBE_DEV_ID_82599_KX4: 1154 if (hw->phy.type == ixgbe_phy_tn ||
1101 /* Default device ID is mezzanine card KX/KX4 */ 1155 hw->phy.type == ixgbe_phy_cu_unknown) {
1102 physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | 1156 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1103 IXGBE_PHYSICAL_LAYER_1000BASE_KX); 1157 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1158 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1159 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1160 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1161 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1162 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1163 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1164 goto out;
1165 }
1166
1167 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1168 case IXGBE_AUTOC_LMS_1G_AN:
1169 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1170 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
1171 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
1172 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1173 goto out;
1174 } else
1175 /* SFI mode so read SFP module */
1176 goto sfp_check;
1104 break; 1177 break;
1105 case IXGBE_DEV_ID_82599_SFP: 1178 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1106 hw->phy.ops.identify_sfp(hw); 1179 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
1180 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1181 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
1182 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1183 goto out;
1184 break;
1185 case IXGBE_AUTOC_LMS_10G_SERIAL:
1186 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
1187 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
1188 goto out;
1189 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
1190 goto sfp_check;
1191 break;
1192 case IXGBE_AUTOC_LMS_KX4_KX_KR:
1193 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
1194 if (autoc & IXGBE_AUTOC_KX_SUPP)
1195 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1196 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1197 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1198 if (autoc & IXGBE_AUTOC_KR_SUPP)
1199 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
1200 goto out;
1201 break;
1202 default:
1203 goto out;
1204 break;
1205 }
1107 1206
1108 switch (hw->phy.sfp_type) { 1207sfp_check:
1109 case ixgbe_sfp_type_da_cu: 1208 /* SFP check must be done last since DA modules are sometimes used to
1110 case ixgbe_sfp_type_da_cu_core0: 1209 * test KR mode - we need to id KR mode correctly before SFP module.
1111 case ixgbe_sfp_type_da_cu_core1: 1210 * Call identify_sfp because the pluggable module may have changed */
1112 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1211 hw->phy.ops.identify_sfp(hw);
1113 break; 1212 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1114 case ixgbe_sfp_type_sr: 1213 goto out;
1214
1215 switch (hw->phy.type) {
1216 case ixgbe_phy_tw_tyco:
1217 case ixgbe_phy_tw_unknown:
1218 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1219 break;
1220 case ixgbe_phy_sfp_avago:
1221 case ixgbe_phy_sfp_ftl:
1222 case ixgbe_phy_sfp_intel:
1223 case ixgbe_phy_sfp_unknown:
1224 hw->phy.ops.read_i2c_eeprom(hw,
1225 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
1226 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1115 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1227 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1116 break; 1228 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1117 case ixgbe_sfp_type_lr:
1118 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1229 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1119 break;
1120 case ixgbe_sfp_type_srlr_core0:
1121 case ixgbe_sfp_type_srlr_core1:
1122 hw->phy.ops.read_i2c_eeprom(hw,
1123 IXGBE_SFF_10GBE_COMP_CODES,
1124 &comp_codes_10g);
1125 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1126 physical_layer =
1127 IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1128 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1129 physical_layer =
1130 IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1131 else
1132 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1133 default:
1134 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1135 break;
1136 }
1137 break; 1230 break;
1138 default: 1231 default:
1139 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1140 break; 1232 break;
1141 } 1233 }
1142 1234
1235out:
1143 return physical_layer; 1236 return physical_layer;
1144} 1237}
1145 1238
@@ -1187,6 +1280,22 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
1187 return 0; 1280 return 0;
1188} 1281}
1189 1282
1283/**
1284 * ixgbe_get_device_caps_82599 - Get additional device capabilities
1285 * @hw: pointer to hardware structure
1286 * @device_caps: the EEPROM word with the extra device capabilities
1287 *
1288 * This function will read the EEPROM location for the device capabilities,
1289 * and return the word through device_caps.
1290 **/
1291s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
1292{
1293 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
1294
1295 return 0;
1296}
1297
1298
1190static struct ixgbe_mac_operations mac_ops_82599 = { 1299static struct ixgbe_mac_operations mac_ops_82599 = {
1191 .init_hw = &ixgbe_init_hw_generic, 1300 .init_hw = &ixgbe_init_hw_generic,
1192 .reset_hw = &ixgbe_reset_hw_82599, 1301 .reset_hw = &ixgbe_reset_hw_82599,
@@ -1196,6 +1305,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
1196 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, 1305 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
1197 .enable_rx_dma = &ixgbe_enable_rx_dma_82599, 1306 .enable_rx_dma = &ixgbe_enable_rx_dma_82599,
1198 .get_mac_addr = &ixgbe_get_mac_addr_generic, 1307 .get_mac_addr = &ixgbe_get_mac_addr_generic,
1308 .get_device_caps = &ixgbe_get_device_caps_82599,
1199 .stop_adapter = &ixgbe_stop_adapter_generic, 1309 .stop_adapter = &ixgbe_stop_adapter_generic,
1200 .get_bus_info = &ixgbe_get_bus_info_generic, 1310 .get_bus_info = &ixgbe_get_bus_info_generic,
1201 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 1311 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
@@ -1236,6 +1346,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
1236static struct ixgbe_phy_operations phy_ops_82599 = { 1346static struct ixgbe_phy_operations phy_ops_82599 = {
1237 .identify = &ixgbe_identify_phy_82599, 1347 .identify = &ixgbe_identify_phy_82599,
1238 .identify_sfp = &ixgbe_identify_sfp_module_generic, 1348 .identify_sfp = &ixgbe_identify_sfp_module_generic,
1349 .init = &ixgbe_init_phy_ops_82599,
1239 .reset = &ixgbe_reset_phy_generic, 1350 .reset = &ixgbe_reset_phy_generic,
1240 .read_reg = &ixgbe_read_phy_reg_generic, 1351 .read_reg = &ixgbe_read_phy_reg_generic,
1241 .write_reg = &ixgbe_write_phy_reg_generic, 1352 .write_reg = &ixgbe_write_phy_reg_generic,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 5567519676d5..5f2ee34e9d1d 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1700,6 +1700,7 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
1700 } 1700 }
1701 1701
1702 /* Enable 802.3x based flow control settings. */ 1702 /* Enable 802.3x based flow control settings. */
1703 mflcn_reg |= IXGBE_MFLCN_DPF;
1703 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 1704 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
1704 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 1705 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
1705 1706
@@ -1906,9 +1907,11 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1906 * because it causes the controller to just blast out fc packets. 1907 * because it causes the controller to just blast out fc packets.
1907 */ 1908 */
1908 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { 1909 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
1909 hw_dbg(hw, "Invalid water mark configuration\n"); 1910 if (hw->fc.requested_mode != ixgbe_fc_none) {
1910 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 1911 hw_dbg(hw, "Invalid water mark configuration\n");
1911 goto out; 1912 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1913 goto out;
1914 }
1912 } 1915 }
1913 1916
1914 /* 1917 /*
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 01884256f4c9..c45e4e7999ea 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -47,7 +47,7 @@ char ixgbe_driver_name[] = "ixgbe";
47static const char ixgbe_driver_string[] = 47static const char ixgbe_driver_string[] =
48 "Intel(R) 10 Gigabit PCI Express Network Driver"; 48 "Intel(R) 10 Gigabit PCI Express Network Driver";
49 49
50#define DRV_VERSION "2.0.8-k2" 50#define DRV_VERSION "2.0.16-k2"
51const char ixgbe_driver_version[] = DRV_VERSION; 51const char ixgbe_driver_version[] = DRV_VERSION;
52static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 52static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
53 53
@@ -3503,6 +3503,8 @@ static int ixgbe_open(struct net_device *netdev)
3503 if (test_bit(__IXGBE_TESTING, &adapter->state)) 3503 if (test_bit(__IXGBE_TESTING, &adapter->state))
3504 return -EBUSY; 3504 return -EBUSY;
3505 3505
3506 netif_carrier_off(netdev);
3507
3506 /* allocate transmit descriptors */ 3508 /* allocate transmit descriptors */
3507 err = ixgbe_setup_all_tx_resources(adapter); 3509 err = ixgbe_setup_all_tx_resources(adapter);
3508 if (err) 3510 if (err)
@@ -4703,7 +4705,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4703 4705
4704 /* reset_hw fills in the perm_addr as well */ 4706 /* reset_hw fills in the perm_addr as well */
4705 err = hw->mac.ops.reset_hw(hw); 4707 err = hw->mac.ops.reset_hw(hw);
4706 if (err) { 4708 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4709 dev_err(&adapter->pdev->dev, "failed to load because an "
4710 "unsupported SFP+ module type was detected.\n");
4711 goto err_sw_init;
4712 } else if (err) {
4707 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); 4713 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
4708 goto err_sw_init; 4714 goto err_sw_init;
4709 } 4715 }
@@ -4776,6 +4782,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4776 device_init_wakeup(&adapter->pdev->dev, true); 4782 device_init_wakeup(&adapter->pdev->dev, true);
4777 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 4783 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
4778 4784
4785 /* pick up the PCI bus settings for reporting later */
4786 hw->mac.ops.get_bus_info(hw);
4787
4779 /* print bus type/speed/width info */ 4788 /* print bus type/speed/width info */
4780 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", 4789 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
4781 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": 4790 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
@@ -4809,13 +4818,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4809 /* reset the hardware with the new settings */ 4818 /* reset the hardware with the new settings */
4810 hw->mac.ops.start_hw(hw); 4819 hw->mac.ops.start_hw(hw);
4811 4820
4812 netif_carrier_off(netdev);
4813
4814 strcpy(netdev->name, "eth%d"); 4821 strcpy(netdev->name, "eth%d");
4815 err = register_netdev(netdev); 4822 err = register_netdev(netdev);
4816 if (err) 4823 if (err)
4817 goto err_register; 4824 goto err_register;
4818 4825
4826 /* carrier off reporting is important to ethtool even BEFORE open */
4827 netif_carrier_off(netdev);
4828
4819#ifdef CONFIG_IXGBE_DCA 4829#ifdef CONFIG_IXGBE_DCA
4820 if (dca_add_requester(&pdev->dev) == 0) { 4830 if (dca_add_requester(&pdev->dev) == 0) {
4821 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 4831 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 14e9606aa3b3..f3258ec901fe 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -552,6 +552,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
552{ 552{
553 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 553 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
554 u32 vendor_oui = 0; 554 u32 vendor_oui = 0;
555 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
555 u8 identifier = 0; 556 u8 identifier = 0;
556 u8 comp_codes_1g = 0; 557 u8 comp_codes_1g = 0;
557 u8 comp_codes_10g = 0; 558 u8 comp_codes_10g = 0;
@@ -620,8 +621,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
620 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 621 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
621 } 622 }
622 623
624 if (hw->phy.sfp_type != stored_sfp_type)
625 hw->phy.sfp_setup_needed = true;
626
627 /* Determine if the SFP+ PHY is dual speed or not. */
628 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
629 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
630 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
631 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
632 hw->phy.multispeed_fiber = true;
633
623 /* Determine PHY vendor */ 634 /* Determine PHY vendor */
624 if (hw->phy.type == ixgbe_phy_unknown) { 635 if (hw->phy.type != ixgbe_phy_nl) {
625 hw->phy.id = identifier; 636 hw->phy.id = identifier;
626 hw->phy.ops.read_i2c_eeprom(hw, 637 hw->phy.ops.read_i2c_eeprom(hw,
627 IXGBE_SFF_VENDOR_OUI_BYTE0, 638 IXGBE_SFF_VENDOR_OUI_BYTE0,
@@ -671,9 +682,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
671 goto out; 682 goto out;
672 } 683 }
673 684
674 hw->eeprom.ops.read(hw, IXGBE_PHY_ENFORCE_INTEL_SFP_OFFSET, 685 /* This is guaranteed to be 82599, no need to check for NULL */
675 &enforce_sfp); 686 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
676 if (!(enforce_sfp & IXGBE_PHY_ALLOW_ANY_SFP)) { 687 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
677 /* Make sure we're a supported PHY type */ 688 /* Make sure we're a supported PHY type */
678 if (hw->phy.type == ixgbe_phy_sfp_intel) { 689 if (hw->phy.type == ixgbe_phy_sfp_intel) {
679 status = 0; 690 status = 0;
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index cc5f1b3287e1..c9964b7ce1b9 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -44,6 +44,7 @@
44/* Bitmasks */ 44/* Bitmasks */
45#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80 45#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
46#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 46#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
47#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
47#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 48#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
48#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 49#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
49#define IXGBE_I2C_EEPROM_READ_MASK 0x100 50#define IXGBE_I2C_EEPROM_READ_MASK 0x100
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 030ff0a9ea67..a3317d8fbf6a 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -862,6 +862,7 @@
862#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ 862#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
863#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ 863#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
864#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ 864#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
865#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
865 866
866#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ 867#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
867#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ 868#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
@@ -898,8 +899,6 @@
898#define IXGBE_CONTROL_NL 0x000F 899#define IXGBE_CONTROL_NL 0x000F
899#define IXGBE_CONTROL_EOL_NL 0x0FFF 900#define IXGBE_CONTROL_EOL_NL 0x0FFF
900#define IXGBE_CONTROL_SOL_NL 0x0000 901#define IXGBE_CONTROL_SOL_NL 0x0000
901#define IXGBE_PHY_ENFORCE_INTEL_SFP_OFFSET 0x002C
902#define IXGBE_PHY_ALLOW_ANY_SFP 0x1
903 902
904/* General purpose Interrupt Enable */ 903/* General purpose Interrupt Enable */
905#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ 904#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
@@ -958,6 +957,8 @@
958#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ 957#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
959#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ 958#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
960#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ 959#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
960#define IXGBE_VT_CTL_POOL_SHIFT 7
961#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
961 962
962/* VMOLR bitmasks */ 963/* VMOLR bitmasks */
963#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ 964#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
@@ -1148,6 +1149,7 @@
1148 1149
1149/* Interrupt Vector Allocation Registers */ 1150/* Interrupt Vector Allocation Registers */
1150#define IXGBE_IVAR_REG_NUM 25 1151#define IXGBE_IVAR_REG_NUM 25
1152#define IXGBE_IVAR_REG_NUM_82599 64
1151#define IXGBE_IVAR_TXRX_ENTRY 96 1153#define IXGBE_IVAR_TXRX_ENTRY 96
1152#define IXGBE_IVAR_RX_ENTRY 64 1154#define IXGBE_IVAR_RX_ENTRY 64
1153#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) 1155#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
@@ -1382,6 +1384,7 @@
1382#define IXGBE_FW_PTR 0x0F 1384#define IXGBE_FW_PTR 0x0F
1383#define IXGBE_PBANUM0_PTR 0x15 1385#define IXGBE_PBANUM0_PTR 0x15
1384#define IXGBE_PBANUM1_PTR 0x16 1386#define IXGBE_PBANUM1_PTR 0x16
1387#define IXGBE_DEVICE_CAPS 0x2C
1385#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 1388#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
1386#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 1389#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
1387 1390
@@ -1425,6 +1428,8 @@
1425#define IXGBE_EERD_ATTEMPTS 100000 1428#define IXGBE_EERD_ATTEMPTS 100000
1426#endif 1429#endif
1427 1430
1431#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
1432
1428/* PCI Bus Info */ 1433/* PCI Bus Info */
1429#define IXGBE_PCI_LINK_STATUS 0xB2 1434#define IXGBE_PCI_LINK_STATUS 0xB2
1430#define IXGBE_PCI_LINK_WIDTH 0x3F0 1435#define IXGBE_PCI_LINK_WIDTH 0x3F0
@@ -1553,7 +1558,8 @@
1553#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ 1558#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
1554#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ 1559#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
1555#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ 1560#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
1556#define IXGBE_MTQC_64VF 0x8 /* 2 TX Queues per pool w/64VF's */ 1561#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
1562#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
1557#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ 1563#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
1558 1564
1559/* Receive Descriptor bit definitions */ 1565/* Receive Descriptor bit definitions */
@@ -1861,7 +1867,7 @@ typedef u32 ixgbe_physical_layer;
1861#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 1867#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
1862#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 1868#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
1863#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 1869#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
1864#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004 1870#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
1865#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 1871#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
1866#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 1872#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
1867#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 1873#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
@@ -1870,6 +1876,7 @@ typedef u32 ixgbe_physical_layer;
1870#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 1876#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
1871#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 1877#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
1872#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 1878#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
1879#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
1873 1880
1874enum ixgbe_eeprom_type { 1881enum ixgbe_eeprom_type {
1875 ixgbe_eeprom_uninitialized = 0, 1882 ixgbe_eeprom_uninitialized = 0,
@@ -2101,6 +2108,7 @@ struct ixgbe_mac_operations {
2101 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); 2108 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
2102 u32 (*get_supported_physical_layer)(struct ixgbe_hw *); 2109 u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
2103 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); 2110 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
2111 s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
2104 s32 (*stop_adapter)(struct ixgbe_hw *); 2112 s32 (*stop_adapter)(struct ixgbe_hw *);
2105 s32 (*get_bus_info)(struct ixgbe_hw *); 2113 s32 (*get_bus_info)(struct ixgbe_hw *);
2106 void (*set_lan_id)(struct ixgbe_hw *); 2114 void (*set_lan_id)(struct ixgbe_hw *);
@@ -2146,6 +2154,7 @@ struct ixgbe_mac_operations {
2146struct ixgbe_phy_operations { 2154struct ixgbe_phy_operations {
2147 s32 (*identify)(struct ixgbe_hw *); 2155 s32 (*identify)(struct ixgbe_hw *);
2148 s32 (*identify_sfp)(struct ixgbe_hw *); 2156 s32 (*identify_sfp)(struct ixgbe_hw *);
2157 s32 (*init)(struct ixgbe_hw *);
2149 s32 (*reset)(struct ixgbe_hw *); 2158 s32 (*reset)(struct ixgbe_hw *);
2150 s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); 2159 s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
2151 s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); 2160 s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
@@ -2193,6 +2202,7 @@ struct ixgbe_phy_info {
2193 u32 addr; 2202 u32 addr;
2194 u32 id; 2203 u32 id;
2195 enum ixgbe_sfp_type sfp_type; 2204 enum ixgbe_sfp_type sfp_type;
2205 bool sfp_setup_needed;
2196 u32 revision; 2206 u32 revision;
2197 enum ixgbe_media_type media_type; 2207 enum ixgbe_media_type media_type;
2198 bool reset_disable; 2208 bool reset_disable;
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d3bf2f017cc2..2a0174b62e96 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -270,6 +270,18 @@ static int ixpdev_close(struct net_device *dev)
270 return 0; 270 return 0;
271} 271}
272 272
273static const struct net_device_ops ixpdev_netdev_ops = {
274 .ndo_open = ixpdev_open,
275 .ndo_stop = ixpdev_close,
276 .ndo_start_xmit = ixpdev_xmit,
277 .ndo_change_mtu = eth_change_mtu,
278 .ndo_validate_addr = eth_validate_addr,
279 .ndo_set_mac_address = eth_mac_addr,
280#ifdef CONFIG_NET_POLL_CONTROLLER
281 .ndo_poll_controller = ixpdev_poll_controller,
282#endif
283};
284
273struct net_device *ixpdev_alloc(int channel, int sizeof_priv) 285struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
274{ 286{
275 struct net_device *dev; 287 struct net_device *dev;
@@ -279,12 +291,7 @@ struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
279 if (dev == NULL) 291 if (dev == NULL)
280 return NULL; 292 return NULL;
281 293
282 dev->hard_start_xmit = ixpdev_xmit; 294 dev->netdev_ops = &ixpdev_netdev_ops;
283 dev->open = ixpdev_open;
284 dev->stop = ixpdev_close;
285#ifdef CONFIG_NET_POLL_CONTROLLER
286 dev->poll_controller = ixpdev_poll_controller;
287#endif
288 295
289 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 296 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
290 297
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 14248cfc3dfd..d12106b47bf2 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -96,6 +96,18 @@ static int jazzsonic_close(struct net_device* dev)
96 return err; 96 return err;
97} 97}
98 98
99static const struct net_device_ops sonic_netdev_ops = {
100 .ndo_open = jazzsonic_open,
101 .ndo_stop = jazzsonic_close,
102 .ndo_start_xmit = sonic_send_packet,
103 .ndo_get_stats = sonic_get_stats,
104 .ndo_set_multicast_list = sonic_multicast_list,
105 .ndo_tx_timeout = sonic_tx_timeout,
106 .ndo_change_mtu = eth_change_mtu,
107 .ndo_validate_addr = eth_validate_addr,
108 .ndo_set_mac_address = eth_mac_addr,
109};
110
99static int __init sonic_probe1(struct net_device *dev) 111static int __init sonic_probe1(struct net_device *dev)
100{ 112{
101 static unsigned version_printed; 113 static unsigned version_printed;
@@ -179,12 +191,7 @@ static int __init sonic_probe1(struct net_device *dev)
179 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS 191 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
180 * SONIC_BUS_SCALE(lp->dma_bitmode)); 192 * SONIC_BUS_SCALE(lp->dma_bitmode));
181 193
182 dev->open = jazzsonic_open; 194 dev->netdev_ops = &sonic_netdev_ops;
183 dev->stop = jazzsonic_close;
184 dev->hard_start_xmit = sonic_send_packet;
185 dev->get_stats = sonic_get_stats;
186 dev->set_multicast_list = &sonic_multicast_list;
187 dev->tx_timeout = sonic_tx_timeout;
188 dev->watchdog_timeo = TX_TIMEOUT; 195 dev->watchdog_timeo = TX_TIMEOUT;
189 196
190 /* 197 /*
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 38d6649a29c4..dc238567cae1 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -1081,6 +1081,21 @@ static int korina_close(struct net_device *dev)
1081 return 0; 1081 return 0;
1082} 1082}
1083 1083
1084static const struct net_device_ops korina_netdev_ops = {
1085 .ndo_open = korina_open,
1086 .ndo_stop = korina_close,
1087 .ndo_start_xmit = korina_send_packet,
1088 .ndo_set_multicast_list = korina_multicast_list,
1089 .ndo_tx_timeout = korina_tx_timeout,
1090 .ndo_do_ioctl = korina_ioctl,
1091 .ndo_change_mtu = eth_change_mtu,
1092 .ndo_validate_addr = eth_validate_addr,
1093 .ndo_set_mac_address = eth_mac_addr,
1094#ifdef CONFIG_NET_POLL_CONTROLLER
1095 .ndo_poll_controller = korina_poll_controller,
1096#endif
1097};
1098
1084static int korina_probe(struct platform_device *pdev) 1099static int korina_probe(struct platform_device *pdev)
1085{ 1100{
1086 struct korina_device *bif = platform_get_drvdata(pdev); 1101 struct korina_device *bif = platform_get_drvdata(pdev);
@@ -1149,17 +1164,9 @@ static int korina_probe(struct platform_device *pdev)
1149 dev->irq = lp->rx_irq; 1164 dev->irq = lp->rx_irq;
1150 lp->dev = dev; 1165 lp->dev = dev;
1151 1166
1152 dev->open = korina_open; 1167 dev->netdev_ops = &korina_netdev_ops;
1153 dev->stop = korina_close;
1154 dev->hard_start_xmit = korina_send_packet;
1155 dev->set_multicast_list = &korina_multicast_list;
1156 dev->ethtool_ops = &netdev_ethtool_ops; 1168 dev->ethtool_ops = &netdev_ethtool_ops;
1157 dev->tx_timeout = korina_tx_timeout;
1158 dev->watchdog_timeo = TX_TIMEOUT; 1169 dev->watchdog_timeo = TX_TIMEOUT;
1159 dev->do_ioctl = &korina_ioctl;
1160#ifdef CONFIG_NET_POLL_CONTROLLER
1161 dev->poll_controller = korina_poll_controller;
1162#endif
1163 netif_napi_add(dev, &lp->napi, korina_poll, 64); 1170 netif_napi_add(dev, &lp->napi, korina_poll, 64);
1164 1171
1165 lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05); 1172 lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 7415f517491d..070fa4500871 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1036,6 +1036,19 @@ static void print_eth(unsigned char *add, char *str)
1036 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", 1036 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1037 add, add + 6, add, add[12], add[13], str); 1037 add, add + 6, add, add[12], add[13], str);
1038} 1038}
1039static const struct net_device_ops i596_netdev_ops = {
1040 .ndo_open = i596_open,
1041 .ndo_stop = i596_close,
1042 .ndo_start_xmit = i596_start_xmit,
1043 .ndo_set_multicast_list = set_multicast_list,
1044 .ndo_tx_timeout = i596_tx_timeout,
1045 .ndo_change_mtu = eth_change_mtu,
1046 .ndo_validate_addr = eth_validate_addr,
1047 .ndo_set_mac_address = eth_mac_addr,
1048#ifdef CONFIG_NET_POLL_CONTROLLER
1049 .ndo_poll_controller = i596_poll_controller,
1050#endif
1051};
1039 1052
1040static int __devinit i82596_probe(struct net_device *dev) 1053static int __devinit i82596_probe(struct net_device *dev)
1041{ 1054{
@@ -1062,16 +1075,8 @@ static int __devinit i82596_probe(struct net_device *dev)
1062 return -ENOMEM; 1075 return -ENOMEM;
1063 } 1076 }
1064 1077
1065 /* The 82596-specific entries in the device structure. */ 1078 dev->netdev_ops = &i596_netdev_ops;
1066 dev->open = i596_open;
1067 dev->stop = i596_close;
1068 dev->hard_start_xmit = i596_start_xmit;
1069 dev->set_multicast_list = set_multicast_list;
1070 dev->tx_timeout = i596_tx_timeout;
1071 dev->watchdog_timeo = TX_TIMEOUT; 1079 dev->watchdog_timeo = TX_TIMEOUT;
1072#ifdef CONFIG_NET_POLL_CONTROLLER
1073 dev->poll_controller = i596_poll_controller;
1074#endif
1075 1080
1076 memset(dma, 0, sizeof(struct i596_dma)); 1081 memset(dma, 0, sizeof(struct i596_dma));
1077 lp->dma = dma; 1082 lp->dma = dma;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b7d438a367f3..6f71157bea8e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -62,6 +62,7 @@
62struct pcpu_lstats { 62struct pcpu_lstats {
63 unsigned long packets; 63 unsigned long packets;
64 unsigned long bytes; 64 unsigned long bytes;
65 unsigned long drops;
65}; 66};
66 67
67/* 68/*
@@ -71,18 +72,22 @@ struct pcpu_lstats {
71static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) 72static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
72{ 73{
73 struct pcpu_lstats *pcpu_lstats, *lb_stats; 74 struct pcpu_lstats *pcpu_lstats, *lb_stats;
75 int len;
74 76
75 skb_orphan(skb); 77 skb_orphan(skb);
76 78
77 skb->protocol = eth_type_trans(skb,dev); 79 skb->protocol = eth_type_trans(skb, dev);
78 80
79 /* it's OK to use per_cpu_ptr() because BHs are off */ 81 /* it's OK to use per_cpu_ptr() because BHs are off */
80 pcpu_lstats = dev->ml_priv; 82 pcpu_lstats = dev->ml_priv;
81 lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); 83 lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
82 lb_stats->bytes += skb->len;
83 lb_stats->packets++;
84 84
85 netif_rx(skb); 85 len = skb->len;
86 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
87 lb_stats->bytes += len;
88 lb_stats->packets++;
89 } else
90 lb_stats->drops++;
86 91
87 return 0; 92 return 0;
88} 93}
@@ -93,6 +98,7 @@ static struct net_device_stats *loopback_get_stats(struct net_device *dev)
93 struct net_device_stats *stats = &dev->stats; 98 struct net_device_stats *stats = &dev->stats;
94 unsigned long bytes = 0; 99 unsigned long bytes = 0;
95 unsigned long packets = 0; 100 unsigned long packets = 0;
101 unsigned long drops = 0;
96 int i; 102 int i;
97 103
98 pcpu_lstats = dev->ml_priv; 104 pcpu_lstats = dev->ml_priv;
@@ -102,11 +108,14 @@ static struct net_device_stats *loopback_get_stats(struct net_device *dev)
102 lb_stats = per_cpu_ptr(pcpu_lstats, i); 108 lb_stats = per_cpu_ptr(pcpu_lstats, i);
103 bytes += lb_stats->bytes; 109 bytes += lb_stats->bytes;
104 packets += lb_stats->packets; 110 packets += lb_stats->packets;
111 drops += lb_stats->drops;
105 } 112 }
106 stats->rx_packets = packets; 113 stats->rx_packets = packets;
107 stats->tx_packets = packets; 114 stats->tx_packets = packets;
108 stats->rx_bytes = bytes; 115 stats->rx_dropped = drops;
109 stats->tx_bytes = bytes; 116 stats->rx_errors = drops;
117 stats->rx_bytes = bytes;
118 stats->tx_bytes = bytes;
110 return stats; 119 return stats;
111} 120}
112 121
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index feebbd92aff2..1ad740bc8878 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -94,6 +94,16 @@ static void __mace_set_address(struct net_device *dev, void *addr);
94 */ 94 */
95static unsigned char *dummy_buf; 95static unsigned char *dummy_buf;
96 96
97static const struct net_device_ops mace_netdev_ops = {
98 .ndo_open = mace_open,
99 .ndo_stop = mace_close,
100 .ndo_start_xmit = mace_xmit_start,
101 .ndo_set_multicast_list = mace_set_multicast,
102 .ndo_set_mac_address = mace_set_address,
103 .ndo_change_mtu = eth_change_mtu,
104 .ndo_validate_addr = eth_validate_addr,
105};
106
97static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match) 107static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
98{ 108{
99 struct device_node *mace = macio_get_of_node(mdev); 109 struct device_node *mace = macio_get_of_node(mdev);
@@ -207,11 +217,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
207 } 217 }
208 } 218 }
209 219
210 dev->open = mace_open; 220 dev->netdev_ops = &mace_netdev_ops;
211 dev->stop = mace_close;
212 dev->hard_start_xmit = mace_xmit_start;
213 dev->set_multicast_list = mace_set_multicast;
214 dev->set_mac_address = mace_set_address;
215 221
216 /* 222 /*
217 * Most of what is below could be moved to mace_open() 223 * Most of what is below could be moved to mace_open()
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 274e99bb63ac..44f3c2896f20 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -180,6 +180,17 @@ static void mace_dma_off(struct net_device *dev)
180 psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100); 180 psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
181} 181}
182 182
183static const struct net_device_ops mace_netdev_ops = {
184 .ndo_open = mace_open,
185 .ndo_stop = mace_close,
186 .ndo_start_xmit = mace_xmit_start,
187 .ndo_tx_timeout = mace_tx_timeout,
188 .ndo_set_multicast_list = mace_set_multicast,
189 .ndo_set_mac_address = mace_set_address,
190 .ndo_change_mtu = eth_change_mtu,
191 .ndo_validate_addr = eth_validate_addr,
192};
193
183/* 194/*
184 * Not really much of a probe. The hardware table tells us if this 195 * Not really much of a probe. The hardware table tells us if this
185 * model of Macintrash has a MACE (AV macintoshes) 196 * model of Macintrash has a MACE (AV macintoshes)
@@ -240,13 +251,8 @@ static int __devinit mace_probe(struct platform_device *pdev)
240 return -ENODEV; 251 return -ENODEV;
241 } 252 }
242 253
243 dev->open = mace_open; 254 dev->netdev_ops = &mace_netdev_ops;
244 dev->stop = mace_close;
245 dev->hard_start_xmit = mace_xmit_start;
246 dev->tx_timeout = mace_tx_timeout;
247 dev->watchdog_timeo = TX_TIMEOUT; 255 dev->watchdog_timeo = TX_TIMEOUT;
248 dev->set_multicast_list = mace_set_multicast;
249 dev->set_mac_address = mace_set_address;
250 256
251 printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n", 257 printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
252 dev->name, dev->dev_addr); 258 dev->name, dev->dev_addr);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 214a8cf2b708..329cd50d0e29 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -374,36 +374,20 @@ static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
374static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev) 374static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
375{ 375{
376 const struct macvlan_dev *vlan = netdev_priv(dev); 376 const struct macvlan_dev *vlan = netdev_priv(dev);
377 struct net_device *lowerdev = vlan->lowerdev; 377 return dev_ethtool_get_rx_csum(vlan->lowerdev);
378
379 if (lowerdev->ethtool_ops == NULL ||
380 lowerdev->ethtool_ops->get_rx_csum == NULL)
381 return 0;
382 return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
383} 378}
384 379
385static int macvlan_ethtool_get_settings(struct net_device *dev, 380static int macvlan_ethtool_get_settings(struct net_device *dev,
386 struct ethtool_cmd *cmd) 381 struct ethtool_cmd *cmd)
387{ 382{
388 const struct macvlan_dev *vlan = netdev_priv(dev); 383 const struct macvlan_dev *vlan = netdev_priv(dev);
389 struct net_device *lowerdev = vlan->lowerdev; 384 return dev_ethtool_get_settings(vlan->lowerdev, cmd);
390
391 if (!lowerdev->ethtool_ops ||
392 !lowerdev->ethtool_ops->get_settings)
393 return -EOPNOTSUPP;
394
395 return lowerdev->ethtool_ops->get_settings(lowerdev, cmd);
396} 385}
397 386
398static u32 macvlan_ethtool_get_flags(struct net_device *dev) 387static u32 macvlan_ethtool_get_flags(struct net_device *dev)
399{ 388{
400 const struct macvlan_dev *vlan = netdev_priv(dev); 389 const struct macvlan_dev *vlan = netdev_priv(dev);
401 struct net_device *lowerdev = vlan->lowerdev; 390 return dev_ethtool_get_flags(vlan->lowerdev);
402
403 if (!lowerdev->ethtool_ops ||
404 !lowerdev->ethtool_ops->get_flags)
405 return 0;
406 return lowerdev->ethtool_ops->get_flags(lowerdev);
407} 391}
408 392
409static const struct ethtool_ops macvlan_ethtool_ops = { 393static const struct ethtool_ops macvlan_ethtool_ops = {
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index aa08987f6e81..46ffdb464ac4 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -769,9 +769,17 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
769 } 769 }
770} 770}
771 771
772/* 772static const struct net_device_ops meth_netdev_ops = {
773 * Return statistics to the caller 773 .ndo_open = meth_open,
774 */ 774 .ndo_stop = meth_release,
775 .ndo_start_xmit = meth_tx,
776 .ndo_do_ioctl = meth_ioctl,
777 .ndo_tx_timeout = meth_tx_timeout,
778 .ndo_change_mtu = eth_change_mtu,
779 .ndo_validate_addr = eth_validate_addr,
780 .ndo_set_mac_address = eth_mac_addr,
781};
782
775/* 783/*
776 * The init function. 784 * The init function.
777 */ 785 */
@@ -785,16 +793,10 @@ static int __init meth_probe(struct platform_device *pdev)
785 if (!dev) 793 if (!dev)
786 return -ENOMEM; 794 return -ENOMEM;
787 795
788 dev->open = meth_open; 796 dev->netdev_ops = &meth_netdev_ops;
789 dev->stop = meth_release; 797 dev->watchdog_timeo = timeout;
790 dev->hard_start_xmit = meth_tx; 798 dev->irq = MACE_ETHERNET_IRQ;
791 dev->do_ioctl = meth_ioctl; 799 dev->base_addr = (unsigned long)&mace->eth;
792#ifdef HAVE_TX_TIMEOUT
793 dev->tx_timeout = meth_tx_timeout;
794 dev->watchdog_timeo = timeout;
795#endif
796 dev->irq = MACE_ETHERNET_IRQ;
797 dev->base_addr = (unsigned long)&mace->eth;
798 memcpy(dev->dev_addr, o2meth_eaddr, 6); 800 memcpy(dev->dev_addr, o2meth_eaddr, 6);
799 801
800 priv = netdev_priv(dev); 802 priv = netdev_priv(dev);
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 664835b822fb..b3b9a147d09a 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -237,6 +237,16 @@ static void mipsnet_set_mclist(struct net_device *dev)
237{ 237{
238} 238}
239 239
240static const struct net_device_ops mipsnet_netdev_ops = {
241 .ndo_open = mipsnet_open,
242 .ndo_stop = mipsnet_close,
243 .ndo_start_xmit = mipsnet_xmit,
244 .ndo_set_multicast_list = mipsnet_set_mclist,
245 .ndo_change_mtu = eth_change_mtu,
246 .ndo_validate_addr = eth_validate_addr,
247 .ndo_set_mac_address = eth_mac_addr,
248};
249
240static int __init mipsnet_probe(struct platform_device *dev) 250static int __init mipsnet_probe(struct platform_device *dev)
241{ 251{
242 struct net_device *netdev; 252 struct net_device *netdev;
@@ -250,10 +260,7 @@ static int __init mipsnet_probe(struct platform_device *dev)
250 260
251 platform_set_drvdata(dev, netdev); 261 platform_set_drvdata(dev, netdev);
252 262
253 netdev->open = mipsnet_open; 263 netdev->netdev_ops = &mipsnet_netdev_ops;
254 netdev->stop = mipsnet_close;
255 netdev->hard_start_xmit = mipsnet_xmit;
256 netdev->set_multicast_list = mipsnet_set_mclist;
257 264
258 /* 265 /*
259 * TODO: probe for these or load them from PARAM 266 * TODO: probe for these or load them from PARAM
diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c
index 435e5a847c43..93c709d63e2f 100644
--- a/drivers/net/mvme147.c
+++ b/drivers/net/mvme147.c
@@ -57,6 +57,17 @@ typedef void (*writerap_t)(void *, unsigned short);
57typedef void (*writerdp_t)(void *, unsigned short); 57typedef void (*writerdp_t)(void *, unsigned short);
58typedef unsigned short (*readrdp_t)(void *); 58typedef unsigned short (*readrdp_t)(void *);
59 59
60static const struct net_device_ops lance_netdev_ops = {
61 .ndo_open = m147lance_open,
62 .ndo_stop = m147lance_close,
63 .ndo_start_xmit = lance_start_xmit,
64 .ndo_set_multicast_list = lance_set_multicast,
65 .ndo_tx_timeout = lance_tx_timeout,
66 .ndo_change_mtu = eth_change_mtu,
67 .ndo_validate_addr = eth_validate_addr,
68 .ndo_set_mac_address = eth_mac_addr,
69};
70
60/* Initialise the one and only on-board 7990 */ 71/* Initialise the one and only on-board 7990 */
61struct net_device * __init mvme147lance_probe(int unit) 72struct net_device * __init mvme147lance_probe(int unit)
62{ 73{
@@ -81,11 +92,7 @@ struct net_device * __init mvme147lance_probe(int unit)
81 92
82 /* Fill the dev fields */ 93 /* Fill the dev fields */
83 dev->base_addr = (unsigned long)MVME147_LANCE_BASE; 94 dev->base_addr = (unsigned long)MVME147_LANCE_BASE;
84 dev->open = &m147lance_open; 95 dev->netdev_ops = &lance_netdev_ops;
85 dev->stop = &m147lance_close;
86 dev->hard_start_xmit = &lance_start_xmit;
87 dev->set_multicast_list = &lance_set_multicast;
88 dev->tx_timeout = &lance_tx_timeout;
89 dev->dma = 0; 96 dev->dma = 0;
90 97
91 addr=(u_long *)ETHERNET_ADDRESS; 98 addr=(u_long *)ETHERNET_ADDRESS;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f2c4a665e93f..140794a8d56a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.4.4-1.401" 78#define MYRI10GE_VERSION_STR "1.4.4-1.412"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -255,6 +255,7 @@ struct myri10ge_priv {
255 u32 read_write_dma; 255 u32 read_write_dma;
256 u32 link_changes; 256 u32 link_changes;
257 u32 msg_enable; 257 u32 msg_enable;
258 unsigned int board_number;
258}; 259};
259 260
260static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat"; 261static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
@@ -266,6 +267,13 @@ static char *myri10ge_fw_name = NULL;
266module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 267module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
267MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); 268MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
268 269
270#define MYRI10GE_MAX_BOARDS 8
271static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
272 {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
273module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
274 0444);
275MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
276
269static int myri10ge_ecrc_enable = 1; 277static int myri10ge_ecrc_enable = 1;
270module_param(myri10ge_ecrc_enable, int, S_IRUGO); 278module_param(myri10ge_ecrc_enable, int, S_IRUGO);
271MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); 279MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
@@ -361,6 +369,8 @@ static inline void put_be32(__be32 val, __be32 __iomem * p)
361 __raw_writel((__force __u32) val, (__force void __iomem *)p); 369 __raw_writel((__force __u32) val, (__force void __iomem *)p);
362} 370}
363 371
372static struct net_device_stats *myri10ge_get_stats(struct net_device *dev);
373
364static int 374static int
365myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd, 375myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
366 struct myri10ge_cmd *data, int atomic) 376 struct myri10ge_cmd *data, int atomic)
@@ -1686,7 +1696,7 @@ myri10ge_get_ringparam(struct net_device *netdev,
1686 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1; 1696 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1687 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1; 1697 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1688 ring->rx_jumbo_max_pending = 0; 1698 ring->rx_jumbo_max_pending = 0;
1689 ring->tx_max_pending = mgp->ss[0].rx_small.mask + 1; 1699 ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
1690 ring->rx_mini_pending = ring->rx_mini_max_pending; 1700 ring->rx_mini_pending = ring->rx_mini_max_pending;
1691 ring->rx_pending = ring->rx_max_pending; 1701 ring->rx_pending = ring->rx_max_pending;
1692 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; 1702 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
@@ -1803,6 +1813,8 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1803 int slice; 1813 int slice;
1804 int i; 1814 int i;
1805 1815
1816 /* force stats update */
1817 (void)myri10ge_get_stats(netdev);
1806 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1818 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
1807 data[i] = ((unsigned long *)&mgp->stats)[i]; 1819 data[i] = ((unsigned long *)&mgp->stats)[i];
1808 1820
@@ -2969,6 +2981,7 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
2969 struct net_device_stats *stats = &mgp->stats; 2981 struct net_device_stats *stats = &mgp->stats;
2970 int i; 2982 int i;
2971 2983
2984 spin_lock(&mgp->stats_lock);
2972 memset(stats, 0, sizeof(*stats)); 2985 memset(stats, 0, sizeof(*stats));
2973 for (i = 0; i < mgp->num_slices; i++) { 2986 for (i = 0; i < mgp->num_slices; i++) {
2974 slice_stats = &mgp->ss[i].stats; 2987 slice_stats = &mgp->ss[i].stats;
@@ -2979,6 +2992,7 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
2979 stats->rx_dropped += slice_stats->rx_dropped; 2992 stats->rx_dropped += slice_stats->rx_dropped;
2980 stats->tx_dropped += slice_stats->tx_dropped; 2993 stats->tx_dropped += slice_stats->tx_dropped;
2981 } 2994 }
2995 spin_unlock(&mgp->stats_lock);
2982 return stats; 2996 return stats;
2983} 2997}
2984 2998
@@ -3253,6 +3267,8 @@ abort:
3253 3267
3254static void myri10ge_select_firmware(struct myri10ge_priv *mgp) 3268static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
3255{ 3269{
3270 int overridden = 0;
3271
3256 if (myri10ge_force_firmware == 0) { 3272 if (myri10ge_force_firmware == 0) {
3257 int link_width, exp_cap; 3273 int link_width, exp_cap;
3258 u16 lnk; 3274 u16 lnk;
@@ -3286,10 +3302,18 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
3286 } 3302 }
3287 } 3303 }
3288 if (myri10ge_fw_name != NULL) { 3304 if (myri10ge_fw_name != NULL) {
3289 dev_info(&mgp->pdev->dev, "overriding firmware to %s\n", 3305 overridden = 1;
3290 myri10ge_fw_name);
3291 mgp->fw_name = myri10ge_fw_name; 3306 mgp->fw_name = myri10ge_fw_name;
3292 } 3307 }
3308 if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
3309 myri10ge_fw_names[mgp->board_number] != NULL &&
3310 strlen(myri10ge_fw_names[mgp->board_number])) {
3311 mgp->fw_name = myri10ge_fw_names[mgp->board_number];
3312 overridden = 1;
3313 }
3314 if (overridden)
3315 dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
3316 mgp->fw_name);
3293} 3317}
3294 3318
3295#ifdef CONFIG_PM 3319#ifdef CONFIG_PM
@@ -3754,6 +3778,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3754 int status = -ENXIO; 3778 int status = -ENXIO;
3755 int dac_enabled; 3779 int dac_enabled;
3756 unsigned hdr_offset, ss_offset; 3780 unsigned hdr_offset, ss_offset;
3781 static int board_number;
3757 3782
3758 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES); 3783 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
3759 if (netdev == NULL) { 3784 if (netdev == NULL) {
@@ -3770,6 +3795,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3770 mgp->pause = myri10ge_flow_control; 3795 mgp->pause = myri10ge_flow_control;
3771 mgp->intr_coal_delay = myri10ge_intr_coal_delay; 3796 mgp->intr_coal_delay = myri10ge_intr_coal_delay;
3772 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT); 3797 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
3798 mgp->board_number = board_number;
3773 init_waitqueue_head(&mgp->down_wq); 3799 init_waitqueue_head(&mgp->down_wq);
3774 3800
3775 if (pci_enable_device(pdev)) { 3801 if (pci_enable_device(pdev)) {
@@ -3902,6 +3928,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3902 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, 3928 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
3903 (unsigned long)mgp); 3929 (unsigned long)mgp);
3904 3930
3931 spin_lock_init(&mgp->stats_lock);
3905 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); 3932 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
3906 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); 3933 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
3907 status = register_netdev(netdev); 3934 status = register_netdev(netdev);
@@ -3919,6 +3946,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3919 netdev->irq, mgp->tx_boundary, mgp->fw_name, 3946 netdev->irq, mgp->tx_boundary, mgp->fw_name,
3920 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3947 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3921 3948
3949 board_number++;
3922 return 0; 3950 return 0;
3923 3951
3924abort_with_state: 3952abort_with_state:
@@ -4008,6 +4036,8 @@ static struct pci_device_id myri10ge_pci_tbl[] = {
4008 {0}, 4036 {0},
4009}; 4037};
4010 4038
4039MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
4040
4011static struct pci_driver myri10ge_driver = { 4041static struct pci_driver myri10ge_driver = {
4012 .name = "myri10ge", 4042 .name = "myri10ge",
4013 .probe = myri10ge_probe, 4043 .probe = myri10ge_probe,
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 1861d5bbd96b..946366dcc992 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -301,6 +301,17 @@ netx_eth_phy_write(struct net_device *ndev, int phy_id, int reg, int value)
301 while (readl(NETX_MIIMU) & MIIMU_SNRDY); 301 while (readl(NETX_MIIMU) & MIIMU_SNRDY);
302} 302}
303 303
304static const struct net_device_ops netx_eth_netdev_ops = {
305 .ndo_open = netx_eth_open,
306 .ndo_stop = netx_eth_close,
307 .ndo_start_xmit = netx_eth_hard_start_xmit,
308 .ndo_tx_timeout = netx_eth_timeout,
309 .ndo_set_multicast_list = netx_eth_set_multicast_list,
310 .ndo_change_mtu = eth_change_mtu,
311 .ndo_validate_addr = eth_validate_addr,
312 .ndo_set_mac_address = eth_mac_addr,
313};
314
304static int netx_eth_enable(struct net_device *ndev) 315static int netx_eth_enable(struct net_device *ndev)
305{ 316{
306 struct netx_eth_priv *priv = netdev_priv(ndev); 317 struct netx_eth_priv *priv = netdev_priv(ndev);
@@ -309,12 +320,8 @@ static int netx_eth_enable(struct net_device *ndev)
309 320
310 ether_setup(ndev); 321 ether_setup(ndev);
311 322
312 ndev->open = netx_eth_open; 323 ndev->netdev_ops = &netx_eth_netdev_ops;
313 ndev->stop = netx_eth_close;
314 ndev->hard_start_xmit = netx_eth_hard_start_xmit;
315 ndev->tx_timeout = netx_eth_timeout;
316 ndev->watchdog_timeo = msecs_to_jiffies(5000); 324 ndev->watchdog_timeo = msecs_to_jiffies(5000);
317 ndev->set_multicast_list = netx_eth_set_multicast_list;
318 325
319 priv->msg_enable = NETIF_MSG_LINK; 326 priv->msg_enable = NETIF_MSG_LINK;
320 priv->mii.phy_id_mask = 0x1f; 327 priv->mii.phy_id_mask = 0x1f;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index c40815169f35..ebd6c2edc343 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -34,10 +34,6 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/compiler.h>
38#include <linux/slab.h>
39#include <linux/delay.h>
40#include <linux/init.h>
41#include <linux/ioport.h> 37#include <linux/ioport.h>
42#include <linux/pci.h> 38#include <linux/pci.h>
43#include <linux/netdevice.h> 39#include <linux/netdevice.h>
@@ -49,18 +45,12 @@
49 45
50#include <linux/ethtool.h> 46#include <linux/ethtool.h>
51#include <linux/mii.h> 47#include <linux/mii.h>
52#include <linux/interrupt.h>
53#include <linux/timer.h> 48#include <linux/timer.h>
54 49
55#include <linux/mm.h>
56#include <linux/mman.h>
57#include <linux/vmalloc.h> 50#include <linux/vmalloc.h>
58 51
59#include <asm/system.h>
60#include <asm/io.h> 52#include <asm/io.h>
61#include <asm/byteorder.h> 53#include <asm/byteorder.h>
62#include <asm/uaccess.h>
63#include <asm/pgtable.h>
64 54
65#include "netxen_nic_hw.h" 55#include "netxen_nic_hw.h"
66 56
@@ -84,10 +74,10 @@
84 (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) 74 (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc)
85#define STATUS_DESC_RINGSIZE(sds_ring) \ 75#define STATUS_DESC_RINGSIZE(sds_ring) \
86 (sizeof(struct status_desc) * (sds_ring)->num_desc) 76 (sizeof(struct status_desc) * (sds_ring)->num_desc)
87#define TX_BUFF_RINGSIZE(adapter) \ 77#define TX_BUFF_RINGSIZE(tx_ring) \
88 (sizeof(struct netxen_cmd_buffer) * adapter->num_txd) 78 (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc)
89#define TX_DESC_RINGSIZE(adapter) \ 79#define TX_DESC_RINGSIZE(tx_ring) \
90 (sizeof(struct cmd_desc_type0) * adapter->num_txd) 80 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
91 81
92#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) 82#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
93 83
@@ -118,6 +108,7 @@
118#define NX_P3_A2 0x30 108#define NX_P3_A2 0x30
119#define NX_P3_B0 0x40 109#define NX_P3_B0 0x40
120#define NX_P3_B1 0x41 110#define NX_P3_B1 0x41
111#define NX_P3_B2 0x42
121 112
122#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1) 113#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1)
123#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0) 114#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0)
@@ -203,18 +194,10 @@
203#define MAX_RCV_DESCRIPTORS_10G 4096 194#define MAX_RCV_DESCRIPTORS_10G 4096
204#define MAX_JUMBO_RCV_DESCRIPTORS 1024 195#define MAX_JUMBO_RCV_DESCRIPTORS 1024
205#define MAX_LRO_RCV_DESCRIPTORS 8 196#define MAX_LRO_RCV_DESCRIPTORS 8
206#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
207#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
208#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
209#define MAX_RCVSTATUS_DESC MAX_RCV_DESCRIPTORS
210#define MAX_EPG_DESCRIPTORS (MAX_CMD_DESCRIPTORS * 8)
211#define NUM_RCV_DESC (MAX_RCV_DESC + MAX_JUMBO_RCV_DESCRIPTORS + \
212 MAX_LRO_RCV_DESCRIPTORS)
213#define MIN_TX_COUNT 4096
214#define MIN_RX_COUNT 4096
215#define NETXEN_CTX_SIGNATURE 0xdee0 197#define NETXEN_CTX_SIGNATURE 0xdee0
198#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0
199#define NETXEN_CTX_RESET 0xbad0
216#define NETXEN_RCV_PRODUCER(ringid) (ringid) 200#define NETXEN_RCV_PRODUCER(ringid) (ringid)
217#define MAX_FRAME_SIZE 0x10000 /* 64K MAX size for LSO */
218 201
219#define PHAN_PEG_RCV_INITIALIZED 0xff01 202#define PHAN_PEG_RCV_INITIALIZED 0xff01
220#define PHAN_PEG_RCV_START_INITIALIZE 0xff00 203#define PHAN_PEG_RCV_START_INITIALIZE 0xff00
@@ -253,12 +236,19 @@ typedef u32 netxen_ctx_msg;
253#define netxen_set_msg_opcode(config_word, val) \ 236#define netxen_set_msg_opcode(config_word, val) \
254 ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28) 237 ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28)
255 238
256struct netxen_rcv_context { 239struct netxen_rcv_ring {
257 __le64 rcv_ring_addr; 240 __le64 addr;
258 __le32 rcv_ring_size; 241 __le32 size;
259 __le32 rsrvd; 242 __le32 rsrvd;
260}; 243};
261 244
245struct netxen_sts_ring {
246 __le64 addr;
247 __le32 size;
248 __le16 msi_index;
249 __le16 rsvd;
250} ;
251
262struct netxen_ring_ctx { 252struct netxen_ring_ctx {
263 253
264 /* one command ring */ 254 /* one command ring */
@@ -268,13 +258,18 @@ struct netxen_ring_ctx {
268 __le32 rsrvd; 258 __le32 rsrvd;
269 259
270 /* three receive rings */ 260 /* three receive rings */
271 struct netxen_rcv_context rcv_ctx[3]; 261 struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS];
272 262
273 /* one status ring */
274 __le64 sts_ring_addr; 263 __le64 sts_ring_addr;
275 __le32 sts_ring_size; 264 __le32 sts_ring_size;
276 265
277 __le32 ctx_id; 266 __le32 ctx_id;
267
268 __le64 rsrvd_2[3];
269 __le32 sts_ring_count;
270 __le32 rsrvd_3;
271 struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS];
272
278} __attribute__ ((aligned(64))); 273} __attribute__ ((aligned(64)));
279 274
280/* 275/*
@@ -373,6 +368,7 @@ struct rcv_desc {
373/* opcode field in status_desc */ 368/* opcode field in status_desc */
374#define NETXEN_NIC_RXPKT_DESC 0x04 369#define NETXEN_NIC_RXPKT_DESC 0x04
375#define NETXEN_OLD_RXPKT_DESC 0x3f 370#define NETXEN_OLD_RXPKT_DESC 0x3f
371#define NETXEN_NIC_RESPONSE_DESC 0x05
376 372
377/* for status field in status_desc */ 373/* for status field in status_desc */
378#define STATUS_NEED_CKSUM (1) 374#define STATUS_NEED_CKSUM (1)
@@ -382,13 +378,11 @@ struct rcv_desc {
382#define STATUS_OWNER_HOST (0x1ULL << 56) 378#define STATUS_OWNER_HOST (0x1ULL << 56)
383#define STATUS_OWNER_PHANTOM (0x2ULL << 56) 379#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
384 380
385/* Note: sizeof(status_desc) should always be a mutliple of 2 */ 381/* Status descriptor:
386 382 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
387#define netxen_get_sts_desc_lro_cnt(status_desc) \ 383 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
388 ((status_desc)->lro & 0x7F) 384 53-55 desc_cnt, 56-57 owner, 58-63 opcode
389#define netxen_get_sts_desc_lro_last_frag(status_desc) \ 385 */
390 (((status_desc)->lro & 0x80) >> 7)
391
392#define netxen_get_sts_port(sts_data) \ 386#define netxen_get_sts_port(sts_data) \
393 ((sts_data) & 0x0F) 387 ((sts_data) & 0x0F)
394#define netxen_get_sts_status(sts_data) \ 388#define netxen_get_sts_status(sts_data) \
@@ -403,41 +397,15 @@ struct rcv_desc {
403 (((sts_data) >> 44) & 0x0F) 397 (((sts_data) >> 44) & 0x0F)
404#define netxen_get_sts_pkt_offset(sts_data) \ 398#define netxen_get_sts_pkt_offset(sts_data) \
405 (((sts_data) >> 48) & 0x1F) 399 (((sts_data) >> 48) & 0x1F)
400#define netxen_get_sts_desc_cnt(sts_data) \
401 (((sts_data) >> 53) & 0x7)
406#define netxen_get_sts_opcode(sts_data) \ 402#define netxen_get_sts_opcode(sts_data) \
407 (((sts_data) >> 58) & 0x03F) 403 (((sts_data) >> 58) & 0x03F)
408 404
409struct status_desc { 405struct status_desc {
410 /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length 406 __le64 status_desc_data[2];
411 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
412 53-55 desc_cnt, 56-57 owner, 58-63 opcode
413 */
414 __le64 status_desc_data;
415 union {
416 struct {
417 __le32 hash_value;
418 u8 hash_type;
419 u8 msg_type;
420 u8 unused;
421 union {
422 /* Bit pattern: 0-6 lro_count indicates frag
423 * sequence, 7 last_frag indicates last frag
424 */
425 u8 lro;
426
427 /* chained buffers */
428 u8 nr_frags;
429 };
430 };
431 struct {
432 __le16 frag_handles[4];
433 };
434 };
435} __attribute__ ((aligned(16))); 407} __attribute__ ((aligned(16)));
436 408
437enum {
438 NETXEN_RCV_PEG_0 = 0,
439 NETXEN_RCV_PEG_1
440};
441/* The version of the main data structure */ 409/* The version of the main data structure */
442#define NETXEN_BDINFO_VERSION 1 410#define NETXEN_BDINFO_VERSION 1
443 411
@@ -447,85 +415,35 @@ enum {
447/* Max number of Gig ports on a Phantom board */ 415/* Max number of Gig ports on a Phantom board */
448#define NETXEN_MAX_PORTS 4 416#define NETXEN_MAX_PORTS 4
449 417
450typedef enum { 418#define NETXEN_BRDTYPE_P1_BD 0x0000
451 NETXEN_BRDTYPE_P1_BD = 0x0000, 419#define NETXEN_BRDTYPE_P1_SB 0x0001
452 NETXEN_BRDTYPE_P1_SB = 0x0001, 420#define NETXEN_BRDTYPE_P1_SMAX 0x0002
453 NETXEN_BRDTYPE_P1_SMAX = 0x0002, 421#define NETXEN_BRDTYPE_P1_SOCK 0x0003
454 NETXEN_BRDTYPE_P1_SOCK = 0x0003, 422
455 423#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008
456 NETXEN_BRDTYPE_P2_SOCK_31 = 0x0008, 424#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009
457 NETXEN_BRDTYPE_P2_SOCK_35 = 0x0009, 425#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a
458 NETXEN_BRDTYPE_P2_SB35_4G = 0x000a, 426#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b
459 NETXEN_BRDTYPE_P2_SB31_10G = 0x000b, 427#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c
460 NETXEN_BRDTYPE_P2_SB31_2G = 0x000c, 428
461 429#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d
462 NETXEN_BRDTYPE_P2_SB31_10G_IMEZ = 0x000d, 430#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e
463 NETXEN_BRDTYPE_P2_SB31_10G_HMEZ = 0x000e, 431#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f
464 NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f, 432
465 433#define NETXEN_BRDTYPE_P3_REF_QG 0x0021
466 NETXEN_BRDTYPE_P3_REF_QG = 0x0021, 434#define NETXEN_BRDTYPE_P3_HMEZ 0x0022
467 NETXEN_BRDTYPE_P3_HMEZ = 0x0022, 435#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023
468 NETXEN_BRDTYPE_P3_10G_CX4_LP = 0x0023, 436#define NETXEN_BRDTYPE_P3_4_GB 0x0024
469 NETXEN_BRDTYPE_P3_4_GB = 0x0024, 437#define NETXEN_BRDTYPE_P3_IMEZ 0x0025
470 NETXEN_BRDTYPE_P3_IMEZ = 0x0025, 438#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026
471 NETXEN_BRDTYPE_P3_10G_SFP_PLUS = 0x0026, 439#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027
472 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, 440#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028
473 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, 441#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029
474 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, 442#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a
475 NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a, 443#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b
476 NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b, 444#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031
477 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, 445#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032
478 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032, 446#define NETXEN_BRDTYPE_P3_10G_TP 0x0080
479 NETXEN_BRDTYPE_P3_10G_TP = 0x0080
480
481} netxen_brdtype_t;
482
483typedef enum {
484 NETXEN_BRDMFG_INVENTEC = 1
485} netxen_brdmfg;
486
487typedef enum {
488 MEM_ORG_128Mbx4 = 0x0, /* DDR1 only */
489 MEM_ORG_128Mbx8 = 0x1, /* DDR1 only */
490 MEM_ORG_128Mbx16 = 0x2, /* DDR1 only */
491 MEM_ORG_256Mbx4 = 0x3,
492 MEM_ORG_256Mbx8 = 0x4,
493 MEM_ORG_256Mbx16 = 0x5,
494 MEM_ORG_512Mbx4 = 0x6,
495 MEM_ORG_512Mbx8 = 0x7,
496 MEM_ORG_512Mbx16 = 0x8,
497 MEM_ORG_1Gbx4 = 0x9,
498 MEM_ORG_1Gbx8 = 0xa,
499 MEM_ORG_1Gbx16 = 0xb,
500 MEM_ORG_2Gbx4 = 0xc,
501 MEM_ORG_2Gbx8 = 0xd,
502 MEM_ORG_2Gbx16 = 0xe,
503 MEM_ORG_128Mbx32 = 0x10002, /* GDDR only */
504 MEM_ORG_256Mbx32 = 0x10005 /* GDDR only */
505} netxen_mn_mem_org_t;
506
507typedef enum {
508 MEM_ORG_512Kx36 = 0x0,
509 MEM_ORG_1Mx36 = 0x1,
510 MEM_ORG_2Mx36 = 0x2
511} netxen_sn_mem_org_t;
512
513typedef enum {
514 MEM_DEPTH_4MB = 0x1,
515 MEM_DEPTH_8MB = 0x2,
516 MEM_DEPTH_16MB = 0x3,
517 MEM_DEPTH_32MB = 0x4,
518 MEM_DEPTH_64MB = 0x5,
519 MEM_DEPTH_128MB = 0x6,
520 MEM_DEPTH_256MB = 0x7,
521 MEM_DEPTH_512MB = 0x8,
522 MEM_DEPTH_1GB = 0x9,
523 MEM_DEPTH_2GB = 0xa,
524 MEM_DEPTH_4GB = 0xb,
525 MEM_DEPTH_8GB = 0xc,
526 MEM_DEPTH_16GB = 0xd,
527 MEM_DEPTH_32GB = 0xe
528} netxen_mem_depth_t;
529 447
530struct netxen_board_info { 448struct netxen_board_info {
531 u32 header_version; 449 u32 header_version;
@@ -676,17 +594,15 @@ struct netxen_new_user_info {
676#define PRIMARY_IMAGE_BAD 0xffffffff 594#define PRIMARY_IMAGE_BAD 0xffffffff
677 595
678/* Flash memory map */ 596/* Flash memory map */
679typedef enum { 597#define NETXEN_CRBINIT_START 0 /* crbinit section */
680 NETXEN_CRBINIT_START = 0, /* Crbinit section */ 598#define NETXEN_BRDCFG_START 0x4000 /* board config */
681 NETXEN_BRDCFG_START = 0x4000, /* board config */ 599#define NETXEN_INITCODE_START 0x6000 /* pegtune code */
682 NETXEN_INITCODE_START = 0x6000, /* pegtune code */ 600#define NETXEN_BOOTLD_START 0x10000 /* bootld */
683 NETXEN_BOOTLD_START = 0x10000, /* bootld */ 601#define NETXEN_IMAGE_START 0x43000 /* compressed image */
684 NETXEN_IMAGE_START = 0x43000, /* compressed image */ 602#define NETXEN_SECONDARY_START 0x200000 /* backup images */
685 NETXEN_SECONDARY_START = 0x200000, /* backup images */ 603#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
686 NETXEN_PXE_START = 0x3E0000, /* user defined region */ 604#define NETXEN_USER_START 0x3E8000 /* Firmare info */
687 NETXEN_USER_START = 0x3E8000, /* User defined region for new boards */ 605#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
688 NETXEN_FIXED_START = 0x3F0000 /* backup of crbinit */
689} netxen_flash_map_t;
690 606
691#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408) 607#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408)
692#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c) 608#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c)
@@ -708,21 +624,8 @@ typedef enum {
708#define NETXEN_FLASH_SECONDARY_SIZE (NETXEN_USER_START-NETXEN_SECONDARY_START) 624#define NETXEN_FLASH_SECONDARY_SIZE (NETXEN_USER_START-NETXEN_SECONDARY_START)
709#define NETXEN_NUM_PRIMARY_SECTORS (0x20) 625#define NETXEN_NUM_PRIMARY_SECTORS (0x20)
710#define NETXEN_NUM_CONFIG_SECTORS (1) 626#define NETXEN_NUM_CONFIG_SECTORS (1)
711#define PFX "NetXen: "
712extern char netxen_nic_driver_name[]; 627extern char netxen_nic_driver_name[];
713 628
714/* Note: Make sure to not call this before adapter->port is valid */
715#if !defined(NETXEN_DEBUG)
716#define DPRINTK(klevel, fmt, args...) do { \
717 } while (0)
718#else
719#define DPRINTK(klevel, fmt, args...) do { \
720 printk(KERN_##klevel PFX "%s: %s: " fmt, __func__,\
721 (adapter != NULL && adapter->netdev != NULL) ? \
722 adapter->netdev->name : NULL, \
723 ## args); } while(0)
724#endif
725
726/* Number of status descriptors to handle per interrupt */ 629/* Number of status descriptors to handle per interrupt */
727#define MAX_STATUS_HANDLE (64) 630#define MAX_STATUS_HANDLE (64)
728 631
@@ -732,7 +635,7 @@ extern char netxen_nic_driver_name[];
732 */ 635 */
733struct netxen_skb_frag { 636struct netxen_skb_frag {
734 u64 dma; 637 u64 dma;
735 ulong length; 638 u64 length;
736}; 639};
737 640
738#define _netxen_set_bits(config_word, start, bits, val) {\ 641#define _netxen_set_bits(config_word, start, bits, val) {\
@@ -793,34 +696,24 @@ struct netxen_hardware_context {
793 696
794 u8 cut_through; 697 u8 cut_through;
795 u8 revision_id; 698 u8 revision_id;
699 u8 pci_func;
700 u8 linkup;
796 u16 port_type; 701 u16 port_type;
797 int board_type; 702 u16 board_type;
798 u32 linkup;
799 /* Address of cmd ring in Phantom */
800 struct cmd_desc_type0 *cmd_desc_head;
801 dma_addr_t cmd_desc_phys_addr;
802 struct netxen_adapter *adapter;
803 int pci_func;
804}; 703};
805 704
806#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ 705#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
807#define ETHERNET_FCS_SIZE 4 706#define ETHERNET_FCS_SIZE 4
808 707
809struct netxen_adapter_stats { 708struct netxen_adapter_stats {
810 u64 rcvdbadskb;
811 u64 xmitcalled; 709 u64 xmitcalled;
812 u64 xmitedframes;
813 u64 xmitfinished; 710 u64 xmitfinished;
814 u64 badskblen;
815 u64 nocmddescriptor;
816 u64 polled;
817 u64 rxdropped; 711 u64 rxdropped;
818 u64 txdropped; 712 u64 txdropped;
819 u64 csummed; 713 u64 csummed;
820 u64 no_rcv; 714 u64 no_rcv;
821 u64 rxbytes; 715 u64 rxbytes;
822 u64 txbytes; 716 u64 txbytes;
823 u64 ints;
824}; 717};
825 718
826/* 719/*
@@ -852,14 +745,25 @@ struct nx_host_sds_ring {
852 struct napi_struct napi; 745 struct napi_struct napi;
853 struct list_head free_list[NUM_RCV_DESC_RINGS]; 746 struct list_head free_list[NUM_RCV_DESC_RINGS];
854 747
855 u16 clean_tx;
856 u16 post_rxd;
857 int irq; 748 int irq;
858 749
859 dma_addr_t phys_addr; 750 dma_addr_t phys_addr;
860 char name[IFNAMSIZ+4]; 751 char name[IFNAMSIZ+4];
861}; 752};
862 753
754struct nx_host_tx_ring {
755 u32 producer;
756 __le32 *hw_consumer;
757 u32 sw_consumer;
758 u32 crb_cmd_producer;
759 u32 crb_cmd_consumer;
760 u32 num_desc;
761
762 struct netxen_cmd_buffer *cmd_buf_arr;
763 struct cmd_desc_type0 *desc_head;
764 dma_addr_t phys_addr;
765};
766
863/* 767/*
864 * Receive context. There is one such structure per instance of the 768 * Receive context. There is one such structure per instance of the
865 * receive processing. Any state information that is relevant to 769 * receive processing. Any state information that is relevant to
@@ -872,7 +776,7 @@ struct netxen_recv_context {
872 u16 virt_port; 776 u16 virt_port;
873 777
874 struct nx_host_rds_ring rds_rings[NUM_RCV_DESC_RINGS]; 778 struct nx_host_rds_ring rds_rings[NUM_RCV_DESC_RINGS];
875 struct nx_host_sds_ring sds_rings[NUM_STS_DESC_RINGS]; 779 struct nx_host_sds_ring *sds_rings;
876}; 780};
877 781
878/* New HW context creation */ 782/* New HW context creation */
@@ -1154,31 +1058,118 @@ typedef struct {
1154 1058
1155#define NX_MAC_EVENT 0x1 1059#define NX_MAC_EVENT 0x1
1156 1060
1157enum { 1061/*
1158 NX_NIC_H2C_OPCODE_START = 0, 1062 * Driver --> Firmware
1159 NX_NIC_H2C_OPCODE_CONFIG_RSS, 1063 */
1160 NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL, 1064#define NX_NIC_H2C_OPCODE_START 0
1161 NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE, 1065#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1
1162 NX_NIC_H2C_OPCODE_CONFIG_LED, 1066#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2
1163 NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS, 1067#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
1164 NX_NIC_H2C_OPCODE_CONFIG_L2_MAC, 1068#define NX_NIC_H2C_OPCODE_CONFIG_LED 4
1165 NX_NIC_H2C_OPCODE_LRO_REQUEST, 1069#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
1166 NX_NIC_H2C_OPCODE_GET_SNMP_STATS, 1070#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6
1167 NX_NIC_H2C_OPCODE_PROXY_START_REQUEST, 1071#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7
1168 NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST, 1072#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8
1169 NX_NIC_H2C_OPCODE_PROXY_SET_MTU, 1073#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9
1170 NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE, 1074#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
1171 NX_H2P_OPCODE_GET_FINGER_PRINT_REQUEST, 1075#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11
1172 NX_H2P_OPCODE_INSTALL_LICENSE_REQUEST, 1076#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
1173 NX_H2P_OPCODE_GET_LICENSE_CAPABILITY_REQUEST, 1077#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
1174 NX_NIC_H2C_OPCODE_GET_NET_STATS, 1078#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
1175 NX_NIC_H2C_OPCODE_LAST 1079#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
1176}; 1080#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16
1081#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
1082#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18
1083#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19
1084#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20
1085#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21
1086#define NX_NIC_C2C_OPCODE 22
1087#define NX_NIC_H2C_OPCODE_LAST 23
1088
1089/*
1090 * Firmware --> Driver
1091 */
1092
1093#define NX_NIC_C2H_OPCODE_START 128
1094#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
1095#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
1096#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
1097#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
1098#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
1099#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
1100#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
1101#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136
1102#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
1103#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
1104#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
1105#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
1106#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
1107#define NX_NIC_C2H_OPCODE_LAST 142
1177 1108
1178#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ 1109#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
1179#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ 1110#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
1180#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ 1111#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
1181 1112
1113#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5)
1114#define NX_FW_CAPABILITY_SWITCHING (1 << 6)
1115
1116/* module types */
1117#define LINKEVENT_MODULE_NOT_PRESENT 1
1118#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
1119#define LINKEVENT_MODULE_OPTICAL_SRLR 3
1120#define LINKEVENT_MODULE_OPTICAL_LRM 4
1121#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
1122#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
1123#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
1124#define LINKEVENT_MODULE_TWINAX 8
1125
1126#define LINKSPEED_10GBPS 10000
1127#define LINKSPEED_1GBPS 1000
1128#define LINKSPEED_100MBPS 100
1129#define LINKSPEED_10MBPS 10
1130
1131#define LINKSPEED_ENCODED_10MBPS 0
1132#define LINKSPEED_ENCODED_100MBPS 1
1133#define LINKSPEED_ENCODED_1GBPS 2
1134
1135#define LINKEVENT_AUTONEG_DISABLED 0
1136#define LINKEVENT_AUTONEG_ENABLED 1
1137
1138#define LINKEVENT_HALF_DUPLEX 0
1139#define LINKEVENT_FULL_DUPLEX 1
1140
1141#define LINKEVENT_LINKSPEED_MBPS 0
1142#define LINKEVENT_LINKSPEED_ENCODED 1
1143
1144/* firmware response header:
1145 * 63:58 - message type
1146 * 57:56 - owner
1147 * 55:53 - desc count
1148 * 52:48 - reserved
1149 * 47:40 - completion id
1150 * 39:32 - opcode
1151 * 31:16 - error code
1152 * 15:00 - reserved
1153 */
1154#define netxen_get_nic_msgtype(msg_hdr) \
1155 ((msg_hdr >> 58) & 0x3F)
1156#define netxen_get_nic_msg_compid(msg_hdr) \
1157 ((msg_hdr >> 40) & 0xFF)
1158#define netxen_get_nic_msg_opcode(msg_hdr) \
1159 ((msg_hdr >> 32) & 0xFF)
1160#define netxen_get_nic_msg_errcode(msg_hdr) \
1161 ((msg_hdr >> 16) & 0xFFFF)
1162
1163typedef struct {
1164 union {
1165 struct {
1166 u64 hdr;
1167 u64 body[7];
1168 };
1169 u64 words[8];
1170 };
1171} nx_fw_msg_t;
1172
1182typedef struct { 1173typedef struct {
1183 __le64 qhdr; 1174 __le64 qhdr;
1184 __le64 req_hdr; 1175 __le64 req_hdr;
@@ -1218,78 +1209,60 @@ struct netxen_adapter {
1218 1209
1219 struct net_device *netdev; 1210 struct net_device *netdev;
1220 struct pci_dev *pdev; 1211 struct pci_dev *pdev;
1221 int pci_using_dac;
1222 struct net_device_stats net_stats;
1223 int mtu;
1224 int portnum;
1225 u8 physical_port;
1226 u16 tx_context_id;
1227
1228 uint8_t mc_enabled;
1229 uint8_t max_mc_count;
1230 nx_mac_list_t *mac_list; 1212 nx_mac_list_t *mac_list;
1231 1213
1232 struct netxen_legacy_intr_set legacy_intr;
1233
1234 struct work_struct watchdog_task;
1235 struct timer_list watchdog_timer;
1236 struct work_struct tx_timeout_task;
1237
1238 u32 curr_window; 1214 u32 curr_window;
1239 u32 crb_win; 1215 u32 crb_win;
1240 rwlock_t adapter_lock; 1216 rwlock_t adapter_lock;
1241 1217
1242 u32 cmd_producer;
1243 __le32 *cmd_consumer;
1244 u32 last_cmd_consumer;
1245 u32 crb_addr_cmd_producer;
1246 u32 crb_addr_cmd_consumer;
1247 spinlock_t tx_clean_lock; 1218 spinlock_t tx_clean_lock;
1248 1219
1249 u32 num_txd; 1220 u16 num_txd;
1250 u32 num_rxd; 1221 u16 num_rxd;
1251 u32 num_jumbo_rxd; 1222 u16 num_jumbo_rxd;
1252 u32 num_lro_rxd; 1223 u16 num_lro_rxd;
1224
1225 u8 max_rds_rings;
1226 u8 max_sds_rings;
1227 u8 driver_mismatch;
1228 u8 msix_supported;
1229 u8 rx_csum;
1230 u8 pci_using_dac;
1231 u8 portnum;
1232 u8 physical_port;
1233
1234 u8 mc_enabled;
1235 u8 max_mc_count;
1236 u8 rss_supported;
1237 u8 resv2;
1238 u32 resv3;
1253 1239
1254 int max_rds_rings; 1240 u8 has_link_events;
1255 int max_sds_rings; 1241 u8 resv1;
1242 u16 tx_context_id;
1243 u16 mtu;
1244 u16 is_up;
1245
1246 u16 link_speed;
1247 u16 link_duplex;
1248 u16 link_autoneg;
1249 u16 module_type;
1256 1250
1251 u32 capabilities;
1257 u32 flags; 1252 u32 flags;
1258 u32 irq; 1253 u32 irq;
1259 int driver_mismatch;
1260 u32 temp; 1254 u32 temp;
1261
1262 u32 fw_major; 1255 u32 fw_major;
1263 u32 fw_version; 1256 u32 fw_version;
1264 1257
1265 int msix_supported;
1266 struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
1267
1268 struct netxen_adapter_stats stats; 1258 struct netxen_adapter_stats stats;
1269 1259
1270 u16 link_speed;
1271 u16 link_duplex;
1272 u16 state;
1273 u16 link_autoneg;
1274 int rx_csum;
1275
1276 struct netxen_cmd_buffer *cmd_buf_arr; /* Command buffers for xmit */
1277
1278 /*
1279 * Receive instances. These can be either one per port,
1280 * or one per peg, etc.
1281 */
1282 struct netxen_recv_context recv_ctx; 1260 struct netxen_recv_context recv_ctx;
1283 1261 struct nx_host_tx_ring tx_ring;
1284 int is_up;
1285 struct netxen_dummy_dma dummy_dma;
1286 nx_nic_intr_coalesce_t coal;
1287 1262
1288 /* Context interface shared between card and host */ 1263 /* Context interface shared between card and host */
1289 struct netxen_ring_ctx *ctx_desc; 1264 struct netxen_ring_ctx *ctx_desc;
1290 dma_addr_t ctx_desc_phys_addr; 1265 dma_addr_t ctx_desc_phys_addr;
1291 int intr_scheme;
1292 int msi_mode;
1293 int (*enable_phy_interrupts) (struct netxen_adapter *); 1266 int (*enable_phy_interrupts) (struct netxen_adapter *);
1294 int (*disable_phy_interrupts) (struct netxen_adapter *); 1267 int (*disable_phy_interrupts) (struct netxen_adapter *);
1295 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); 1268 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);
@@ -1300,17 +1273,29 @@ struct netxen_adapter {
1300 int (*init_port) (struct netxen_adapter *, int); 1273 int (*init_port) (struct netxen_adapter *, int);
1301 int (*stop_port) (struct netxen_adapter *); 1274 int (*stop_port) (struct netxen_adapter *);
1302 1275
1303 int (*hw_read_wx)(struct netxen_adapter *, ulong, void *, int); 1276 u32 (*hw_read_wx)(struct netxen_adapter *, ulong);
1304 int (*hw_write_wx)(struct netxen_adapter *, ulong, void *, int); 1277 int (*hw_write_wx)(struct netxen_adapter *, ulong, u32);
1305 int (*pci_mem_read)(struct netxen_adapter *, u64, void *, int); 1278 int (*pci_mem_read)(struct netxen_adapter *, u64, void *, int);
1306 int (*pci_mem_write)(struct netxen_adapter *, u64, void *, int); 1279 int (*pci_mem_write)(struct netxen_adapter *, u64, void *, int);
1307 int (*pci_write_immediate)(struct netxen_adapter *, u64, u32); 1280 int (*pci_write_immediate)(struct netxen_adapter *, u64, u32);
1308 u32 (*pci_read_immediate)(struct netxen_adapter *, u64); 1281 u32 (*pci_read_immediate)(struct netxen_adapter *, u64);
1309 void (*pci_write_normalize)(struct netxen_adapter *, u64, u32);
1310 u32 (*pci_read_normalize)(struct netxen_adapter *, u64);
1311 unsigned long (*pci_set_window)(struct netxen_adapter *, 1282 unsigned long (*pci_set_window)(struct netxen_adapter *,
1312 unsigned long long); 1283 unsigned long long);
1313}; /* netxen_adapter structure */ 1284
1285 struct netxen_legacy_intr_set legacy_intr;
1286
1287 struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
1288
1289 struct netxen_dummy_dma dummy_dma;
1290
1291 struct work_struct watchdog_task;
1292 struct timer_list watchdog_timer;
1293 struct work_struct tx_timeout_task;
1294
1295 struct net_device_stats net_stats;
1296
1297 nx_nic_intr_coalesce_t coal;
1298};
1314 1299
1315/* 1300/*
1316 * NetXen dma watchdog control structure 1301 * NetXen dma watchdog control structure
@@ -1330,46 +1315,6 @@ struct netxen_adapter {
1330#define netxen_get_dma_watchdog_disabled(config_word) \ 1315#define netxen_get_dma_watchdog_disabled(config_word) \
1331 (((config_word) >> 1) & 0x1) 1316 (((config_word) >> 1) & 0x1)
1332 1317
1333/* Max number of xmit producer threads that can run simultaneously */
1334#define MAX_XMIT_PRODUCERS 16
1335
1336#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
1337 ((adapter)->ahw.pci_base0 + (off))
1338#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
1339 ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
1340#define PCI_OFFSET_THIRD_RANGE(adapter, off) \
1341 ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
1342
1343static inline void __iomem *pci_base_offset(struct netxen_adapter *adapter,
1344 unsigned long off)
1345{
1346 if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) {
1347 return (adapter->ahw.pci_base0 + off);
1348 } else if ((off < SECOND_PAGE_GROUP_END) &&
1349 (off >= SECOND_PAGE_GROUP_START)) {
1350 return (adapter->ahw.pci_base1 + off - SECOND_PAGE_GROUP_START);
1351 } else if ((off < THIRD_PAGE_GROUP_END) &&
1352 (off >= THIRD_PAGE_GROUP_START)) {
1353 return (adapter->ahw.pci_base2 + off - THIRD_PAGE_GROUP_START);
1354 }
1355 return NULL;
1356}
1357
1358static inline void __iomem *pci_base(struct netxen_adapter *adapter,
1359 unsigned long off)
1360{
1361 if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) {
1362 return adapter->ahw.pci_base0;
1363 } else if ((off < SECOND_PAGE_GROUP_END) &&
1364 (off >= SECOND_PAGE_GROUP_START)) {
1365 return adapter->ahw.pci_base1;
1366 } else if ((off < THIRD_PAGE_GROUP_END) &&
1367 (off >= THIRD_PAGE_GROUP_START)) {
1368 return adapter->ahw.pci_base2;
1369 }
1370 return NULL;
1371}
1372
1373int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter); 1318int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter);
1374int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter); 1319int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter);
1375int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter); 1320int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter);
@@ -1382,21 +1327,19 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
1382/* Functions available from netxen_nic_hw.c */ 1327/* Functions available from netxen_nic_hw.c */
1383int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); 1328int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
1384int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu); 1329int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu);
1385void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val); 1330
1386int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off); 1331#define NXRD32(adapter, off) \
1387void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value); 1332 (adapter->hw_read_wx(adapter, off))
1388void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 *value); 1333#define NXWR32(adapter, off, val) \
1389void netxen_nic_write_w1(struct netxen_adapter *adapter, u32 index, u32 value); 1334 (adapter->hw_write_wx(adapter, off, val))
1390void netxen_nic_read_w1(struct netxen_adapter *adapter, u32 index, u32 *value);
1391 1335
1392int netxen_nic_get_board_info(struct netxen_adapter *adapter); 1336int netxen_nic_get_board_info(struct netxen_adapter *adapter);
1393void netxen_nic_get_firmware_info(struct netxen_adapter *adapter); 1337void netxen_nic_get_firmware_info(struct netxen_adapter *adapter);
1394int netxen_nic_wol_supported(struct netxen_adapter *adapter); 1338int netxen_nic_wol_supported(struct netxen_adapter *adapter);
1395 1339
1396int netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, 1340u32 netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off);
1397 ulong off, void *data, int len);
1398int netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, 1341int netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
1399 ulong off, void *data, int len); 1342 ulong off, u32 data);
1400int netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, 1343int netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
1401 u64 off, void *data, int size); 1344 u64 off, void *data, int size);
1402int netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, 1345int netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
@@ -1412,16 +1355,13 @@ unsigned long netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
1412void netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter, 1355void netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter,
1413 u32 wndw); 1356 u32 wndw);
1414 1357
1415int netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, 1358u32 netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off);
1416 ulong off, void *data, int len);
1417int netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, 1359int netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter,
1418 ulong off, void *data, int len); 1360 ulong off, u32 data);
1419int netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, 1361int netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
1420 u64 off, void *data, int size); 1362 u64 off, void *data, int size);
1421int netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, 1363int netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
1422 u64 off, void *data, int size); 1364 u64 off, void *data, int size);
1423void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
1424 unsigned long off, int data);
1425int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter, 1365int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter,
1426 u64 off, u32 data); 1366 u64 off, u32 data);
1427u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off); 1367u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off);
@@ -1435,7 +1375,6 @@ unsigned long netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
1435void netxen_free_adapter_offload(struct netxen_adapter *adapter); 1375void netxen_free_adapter_offload(struct netxen_adapter *adapter);
1436int netxen_initialize_adapter_offload(struct netxen_adapter *adapter); 1376int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
1437int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); 1377int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
1438int netxen_receive_peg_ready(struct netxen_adapter *adapter);
1439int netxen_load_firmware(struct netxen_adapter *adapter); 1378int netxen_load_firmware(struct netxen_adapter *adapter);
1440int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); 1379int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
1441 1380
@@ -1475,6 +1414,8 @@ void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
1475int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32); 1414int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
1476int netxen_config_intr_coalesce(struct netxen_adapter *adapter); 1415int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
1477int netxen_config_rss(struct netxen_adapter *adapter, int enable); 1416int netxen_config_rss(struct netxen_adapter *adapter, int enable);
1417int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
1418void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
1478 1419
1479int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); 1420int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
1480int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1421int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
@@ -1483,7 +1424,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p);
1483struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev); 1424struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
1484 1425
1485void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 1426void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
1486 uint32_t crb_producer); 1427 struct nx_host_tx_ring *tx_ring, uint32_t crb_producer);
1487 1428
1488/* 1429/*
1489 * NetXen Board information 1430 * NetXen Board information
@@ -1491,7 +1432,7 @@ void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
1491 1432
1492#define NETXEN_MAX_SHORT_NAME 32 1433#define NETXEN_MAX_SHORT_NAME 32
1493struct netxen_brdinfo { 1434struct netxen_brdinfo {
1494 netxen_brdtype_t brdtype; /* type of board */ 1435 int brdtype; /* type of board */
1495 long ports; /* max no of physical ports */ 1436 long ports; /* max no of physical ports */
1496 char short_name[NETXEN_MAX_SHORT_NAME]; 1437 char short_name[NETXEN_MAX_SHORT_NAME];
1497}; 1438};
@@ -1541,17 +1482,15 @@ dma_watchdog_shutdown_request(struct netxen_adapter *adapter)
1541 u32 ctrl; 1482 u32 ctrl;
1542 1483
1543 /* check if already inactive */ 1484 /* check if already inactive */
1544 if (adapter->hw_read_wx(adapter, 1485 ctrl = adapter->hw_read_wx(adapter,
1545 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) 1486 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL));
1546 printk(KERN_ERR "failed to read dma watchdog status\n");
1547 1487
1548 if (netxen_get_dma_watchdog_enabled(ctrl) == 0) 1488 if (netxen_get_dma_watchdog_enabled(ctrl) == 0)
1549 return 1; 1489 return 1;
1550 1490
1551 /* Send the disable request */ 1491 /* Send the disable request */
1552 netxen_set_dma_watchdog_disable_req(ctrl); 1492 netxen_set_dma_watchdog_disable_req(ctrl);
1553 netxen_crb_writelit_adapter(adapter, 1493 NXWR32(adapter, NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), ctrl);
1554 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), ctrl);
1555 1494
1556 return 0; 1495 return 0;
1557} 1496}
@@ -1561,9 +1500,8 @@ dma_watchdog_shutdown_poll_result(struct netxen_adapter *adapter)
1561{ 1500{
1562 u32 ctrl; 1501 u32 ctrl;
1563 1502
1564 if (adapter->hw_read_wx(adapter, 1503 ctrl = adapter->hw_read_wx(adapter,
1565 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) 1504 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL));
1566 printk(KERN_ERR "failed to read dma watchdog status\n");
1567 1505
1568 return (netxen_get_dma_watchdog_enabled(ctrl) == 0); 1506 return (netxen_get_dma_watchdog_enabled(ctrl) == 0);
1569} 1507}
@@ -1573,9 +1511,8 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
1573{ 1511{
1574 u32 ctrl; 1512 u32 ctrl;
1575 1513
1576 if (adapter->hw_read_wx(adapter, 1514 ctrl = adapter->hw_read_wx(adapter,
1577 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) 1515 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL));
1578 printk(KERN_ERR "failed to read dma watchdog status\n");
1579 1516
1580 if (netxen_get_dma_watchdog_enabled(ctrl)) 1517 if (netxen_get_dma_watchdog_enabled(ctrl))
1581 return 1; 1518 return 1;
@@ -1583,8 +1520,7 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
1583 /* send the wakeup request */ 1520 /* send the wakeup request */
1584 netxen_set_dma_watchdog_enable_req(ctrl); 1521 netxen_set_dma_watchdog_enable_req(ctrl);
1585 1522
1586 netxen_crb_writelit_adapter(adapter, 1523 NXWR32(adapter, NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), ctrl);
1587 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), ctrl);
1588 1524
1589 return 0; 1525 return 0;
1590} 1526}
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 9234473bc08a..fd82adf4f876 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -41,8 +41,7 @@ netxen_api_lock(struct netxen_adapter *adapter)
41 41
42 for (;;) { 42 for (;;) {
43 /* Acquire PCIE HW semaphore5 */ 43 /* Acquire PCIE HW semaphore5 */
44 netxen_nic_read_w0(adapter, 44 done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_LOCK));
45 NETXEN_PCIE_REG(PCIE_SEM5_LOCK), &done);
46 45
47 if (done == 1) 46 if (done == 1)
48 break; 47 break;
@@ -56,7 +55,7 @@ netxen_api_lock(struct netxen_adapter *adapter)
56 } 55 }
57 56
58#if 0 57#if 0
59 netxen_nic_write_w1(adapter, 58 NXWR32(adapter,
60 NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER); 59 NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
61#endif 60#endif
62 return 0; 61 return 0;
@@ -65,11 +64,8 @@ netxen_api_lock(struct netxen_adapter *adapter)
65static int 64static int
66netxen_api_unlock(struct netxen_adapter *adapter) 65netxen_api_unlock(struct netxen_adapter *adapter)
67{ 66{
68 u32 val;
69
70 /* Release PCIE HW semaphore5 */ 67 /* Release PCIE HW semaphore5 */
71 netxen_nic_read_w0(adapter, 68 NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK));
72 NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK), &val);
73 return 0; 69 return 0;
74} 70}
75 71
@@ -86,7 +82,7 @@ netxen_poll_rsp(struct netxen_adapter *adapter)
86 if (++timeout > NX_OS_CRB_RETRY_COUNT) 82 if (++timeout > NX_OS_CRB_RETRY_COUNT)
87 return NX_CDRP_RSP_TIMEOUT; 83 return NX_CDRP_RSP_TIMEOUT;
88 84
89 netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET, &rsp); 85 rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
90 } while (!NX_CDRP_IS_RSP(rsp)); 86 } while (!NX_CDRP_IS_RSP(rsp));
91 87
92 return rsp; 88 return rsp;
@@ -106,16 +102,15 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
106 if (netxen_api_lock(adapter)) 102 if (netxen_api_lock(adapter))
107 return NX_RCODE_TIMEOUT; 103 return NX_RCODE_TIMEOUT;
108 104
109 netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET, signature); 105 NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
110 106
111 netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET, arg1); 107 NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
112 108
113 netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET, arg2); 109 NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
114 110
115 netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET, arg3); 111 NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
116 112
117 netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET, 113 NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
118 NX_CDRP_FORM_CMD(cmd));
119 114
120 rsp = netxen_poll_rsp(adapter); 115 rsp = netxen_poll_rsp(adapter);
121 116
@@ -125,7 +120,7 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
125 120
126 rcode = NX_RCODE_TIMEOUT; 121 rcode = NX_RCODE_TIMEOUT;
127 } else if (rsp == NX_CDRP_RSP_FAIL) { 122 } else if (rsp == NX_CDRP_RSP_FAIL) {
128 netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode); 123 rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
129 124
130 printk(KERN_ERR "%s: failed card response code:0x%x\n", 125 printk(KERN_ERR "%s: failed card response code:0x%x\n",
131 netxen_nic_driver_name, rcode); 126 netxen_nic_driver_name, rcode);
@@ -328,6 +323,7 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
328 int err = 0; 323 int err = 0;
329 u64 offset, phys_addr; 324 u64 offset, phys_addr;
330 dma_addr_t rq_phys_addr, rsp_phys_addr; 325 dma_addr_t rq_phys_addr, rsp_phys_addr;
326 struct nx_host_tx_ring *tx_ring = &adapter->tx_ring;
331 327
332 rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); 328 rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
333 rq_addr = pci_alloc_consistent(adapter->pdev, 329 rq_addr = pci_alloc_consistent(adapter->pdev,
@@ -367,10 +363,8 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
367 363
368 prq_cds = &prq->cds_ring; 364 prq_cds = &prq->cds_ring;
369 365
370 prq_cds->host_phys_addr = 366 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
371 cpu_to_le64(adapter->ahw.cmd_desc_phys_addr); 367 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
372
373 prq_cds->ring_size = cpu_to_le32(adapter->num_txd);
374 368
375 phys_addr = rq_phys_addr; 369 phys_addr = rq_phys_addr;
376 err = netxen_issue_cmd(adapter, 370 err = netxen_issue_cmd(adapter,
@@ -383,8 +377,7 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
383 377
384 if (err == NX_RCODE_SUCCESS) { 378 if (err == NX_RCODE_SUCCESS) {
385 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 379 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
386 adapter->crb_addr_cmd_producer = 380 tx_ring->crb_cmd_producer = NETXEN_NIC_REG(temp - 0x200);
387 NETXEN_NIC_REG(temp - 0x200);
388#if 0 381#if 0
389 adapter->tx_state = 382 adapter->tx_state =
390 le32_to_cpu(prsp->host_ctx_state); 383 le32_to_cpu(prsp->host_ctx_state);
@@ -448,7 +441,19 @@ static struct netxen_recv_crb recv_crb_registers[] = {
448 NETXEN_NIC_REG(0x120) 441 NETXEN_NIC_REG(0x120)
449 }, 442 },
450 /* crb_sts_consumer: */ 443 /* crb_sts_consumer: */
451 NETXEN_NIC_REG(0x138), 444 {
445 NETXEN_NIC_REG(0x138),
446 NETXEN_NIC_REG_2(0x000),
447 NETXEN_NIC_REG_2(0x004),
448 NETXEN_NIC_REG_2(0x008),
449 },
450 /* sw_int_mask */
451 {
452 CRB_SW_INT_MASK_0,
453 NETXEN_NIC_REG_2(0x044),
454 NETXEN_NIC_REG_2(0x048),
455 NETXEN_NIC_REG_2(0x04c),
456 },
452 }, 457 },
453 /* Instance 1 */ 458 /* Instance 1 */
454 { 459 {
@@ -461,7 +466,19 @@ static struct netxen_recv_crb recv_crb_registers[] = {
461 NETXEN_NIC_REG(0x164) 466 NETXEN_NIC_REG(0x164)
462 }, 467 },
463 /* crb_sts_consumer: */ 468 /* crb_sts_consumer: */
464 NETXEN_NIC_REG(0x17c), 469 {
470 NETXEN_NIC_REG(0x17c),
471 NETXEN_NIC_REG_2(0x020),
472 NETXEN_NIC_REG_2(0x024),
473 NETXEN_NIC_REG_2(0x028),
474 },
475 /* sw_int_mask */
476 {
477 CRB_SW_INT_MASK_1,
478 NETXEN_NIC_REG_2(0x064),
479 NETXEN_NIC_REG_2(0x068),
480 NETXEN_NIC_REG_2(0x06c),
481 },
465 }, 482 },
466 /* Instance 2 */ 483 /* Instance 2 */
467 { 484 {
@@ -474,7 +491,19 @@ static struct netxen_recv_crb recv_crb_registers[] = {
474 NETXEN_NIC_REG(0x208) 491 NETXEN_NIC_REG(0x208)
475 }, 492 },
476 /* crb_sts_consumer: */ 493 /* crb_sts_consumer: */
477 NETXEN_NIC_REG(0x220), 494 {
495 NETXEN_NIC_REG(0x220),
496 NETXEN_NIC_REG_2(0x03c),
497 NETXEN_NIC_REG_2(0x03c),
498 NETXEN_NIC_REG_2(0x03c),
499 },
500 /* sw_int_mask */
501 {
502 CRB_SW_INT_MASK_2,
503 NETXEN_NIC_REG_2(0x03c),
504 NETXEN_NIC_REG_2(0x03c),
505 NETXEN_NIC_REG_2(0x03c),
506 },
478 }, 507 },
479 /* Instance 3 */ 508 /* Instance 3 */
480 { 509 {
@@ -487,7 +516,19 @@ static struct netxen_recv_crb recv_crb_registers[] = {
487 NETXEN_NIC_REG(0x24c) 516 NETXEN_NIC_REG(0x24c)
488 }, 517 },
489 /* crb_sts_consumer: */ 518 /* crb_sts_consumer: */
490 NETXEN_NIC_REG(0x264), 519 {
520 NETXEN_NIC_REG(0x264),
521 NETXEN_NIC_REG_2(0x03c),
522 NETXEN_NIC_REG_2(0x03c),
523 NETXEN_NIC_REG_2(0x03c),
524 },
525 /* sw_int_mask */
526 {
527 CRB_SW_INT_MASK_3,
528 NETXEN_NIC_REG_2(0x03c),
529 NETXEN_NIC_REG_2(0x03c),
530 NETXEN_NIC_REG_2(0x03c),
531 },
491 }, 532 },
492}; 533};
493 534
@@ -497,62 +538,65 @@ netxen_init_old_ctx(struct netxen_adapter *adapter)
497 struct netxen_recv_context *recv_ctx; 538 struct netxen_recv_context *recv_ctx;
498 struct nx_host_rds_ring *rds_ring; 539 struct nx_host_rds_ring *rds_ring;
499 struct nx_host_sds_ring *sds_ring; 540 struct nx_host_sds_ring *sds_ring;
541 struct nx_host_tx_ring *tx_ring;
500 int ring; 542 int ring;
501 int func_id = adapter->portnum; 543 int port = adapter->portnum;
544 struct netxen_ring_ctx *hwctx = adapter->ctx_desc;
545 u32 signature;
502 546
503 adapter->ctx_desc->cmd_ring_addr = 547 tx_ring = &adapter->tx_ring;
504 cpu_to_le64(adapter->ahw.cmd_desc_phys_addr); 548 hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
505 adapter->ctx_desc->cmd_ring_size = 549 hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
506 cpu_to_le32(adapter->num_txd);
507 550
508 recv_ctx = &adapter->recv_ctx; 551 recv_ctx = &adapter->recv_ctx;
509 552
510 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 553 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
511 rds_ring = &recv_ctx->rds_rings[ring]; 554 rds_ring = &recv_ctx->rds_rings[ring];
512 555
513 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr = 556 hwctx->rcv_rings[ring].addr =
514 cpu_to_le64(rds_ring->phys_addr); 557 cpu_to_le64(rds_ring->phys_addr);
515 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size = 558 hwctx->rcv_rings[ring].size =
516 cpu_to_le32(rds_ring->num_desc); 559 cpu_to_le32(rds_ring->num_desc);
517 } 560 }
518 sds_ring = &recv_ctx->sds_rings[0];
519 adapter->ctx_desc->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
520 adapter->ctx_desc->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
521 561
522 adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id), 562 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
563 sds_ring = &recv_ctx->sds_rings[ring];
564
565 if (ring == 0) {
566 hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
567 hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
568 }
569 hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
570 hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
571 hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
572 }
573 hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
574
575 signature = (adapter->max_sds_rings > 1) ?
576 NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
577
578 NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
523 lower32(adapter->ctx_desc_phys_addr)); 579 lower32(adapter->ctx_desc_phys_addr));
524 adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_HI(func_id), 580 NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
525 upper32(adapter->ctx_desc_phys_addr)); 581 upper32(adapter->ctx_desc_phys_addr));
526 adapter->pci_write_normalize(adapter, CRB_CTX_SIGNATURE_REG(func_id), 582 NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
527 NETXEN_CTX_SIGNATURE | func_id); 583 signature | port);
528 return 0; 584 return 0;
529} 585}
530 586
531static uint32_t sw_int_mask[4] = {
532 CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1,
533 CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3
534};
535
536int netxen_alloc_hw_resources(struct netxen_adapter *adapter) 587int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
537{ 588{
538 struct netxen_hardware_context *hw = &adapter->ahw;
539 u32 state = 0;
540 void *addr; 589 void *addr;
541 int err = 0; 590 int err = 0;
542 int ring; 591 int ring;
543 struct netxen_recv_context *recv_ctx; 592 struct netxen_recv_context *recv_ctx;
544 struct nx_host_rds_ring *rds_ring; 593 struct nx_host_rds_ring *rds_ring;
545 struct nx_host_sds_ring *sds_ring; 594 struct nx_host_sds_ring *sds_ring;
595 struct nx_host_tx_ring *tx_ring = &adapter->tx_ring;
546 596
547 struct pci_dev *pdev = adapter->pdev; 597 struct pci_dev *pdev = adapter->pdev;
548 struct net_device *netdev = adapter->netdev; 598 struct net_device *netdev = adapter->netdev;
549 599 int port = adapter->portnum;
550 err = netxen_receive_peg_ready(adapter);
551 if (err) {
552 printk(KERN_ERR "Rcv Peg initialization not complete:%x.\n",
553 state);
554 return err;
555 }
556 600
557 addr = pci_alloc_consistent(pdev, 601 addr = pci_alloc_consistent(pdev,
558 sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), 602 sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
@@ -564,17 +608,16 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
564 } 608 }
565 memset(addr, 0, sizeof(struct netxen_ring_ctx)); 609 memset(addr, 0, sizeof(struct netxen_ring_ctx));
566 adapter->ctx_desc = (struct netxen_ring_ctx *)addr; 610 adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
567 adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum); 611 adapter->ctx_desc->ctx_id = cpu_to_le32(port);
568 adapter->ctx_desc->cmd_consumer_offset = 612 adapter->ctx_desc->cmd_consumer_offset =
569 cpu_to_le64(adapter->ctx_desc_phys_addr + 613 cpu_to_le64(adapter->ctx_desc_phys_addr +
570 sizeof(struct netxen_ring_ctx)); 614 sizeof(struct netxen_ring_ctx));
571 adapter->cmd_consumer = 615 tx_ring->hw_consumer =
572 (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); 616 (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
573 617
574 /* cmd desc ring */ 618 /* cmd desc ring */
575 addr = pci_alloc_consistent(pdev, 619 addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
576 TX_DESC_RINGSIZE(adapter), 620 &tx_ring->phys_addr);
577 &hw->cmd_desc_phys_addr);
578 621
579 if (addr == NULL) { 622 if (addr == NULL) {
580 dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", 623 dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
@@ -582,7 +625,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
582 return -ENOMEM; 625 return -ENOMEM;
583 } 626 }
584 627
585 hw->cmd_desc_head = (struct cmd_desc_type0 *)addr; 628 tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
586 629
587 recv_ctx = &adapter->recv_ctx; 630 recv_ctx = &adapter->recv_ctx;
588 631
@@ -602,8 +645,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
602 645
603 if (adapter->fw_major < 4) 646 if (adapter->fw_major < 4)
604 rds_ring->crb_rcv_producer = 647 rds_ring->crb_rcv_producer =
605 recv_crb_registers[adapter->portnum]. 648 recv_crb_registers[port].crb_rcv_producer[ring];
606 crb_rcv_producer[ring];
607 } 649 }
608 650
609 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 651 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -620,13 +662,16 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
620 goto err_out_free; 662 goto err_out_free;
621 } 663 }
622 sds_ring->desc_head = (struct status_desc *)addr; 664 sds_ring->desc_head = (struct status_desc *)addr;
665
666 sds_ring->crb_sts_consumer =
667 recv_crb_registers[port].crb_sts_consumer[ring];
668
669 sds_ring->crb_intr_mask =
670 recv_crb_registers[port].sw_int_mask[ring];
623 } 671 }
624 672
625 673
626 if (adapter->fw_major >= 4) { 674 if (adapter->fw_major >= 4) {
627 adapter->intr_scheme = INTR_SCHEME_PERPORT;
628 adapter->msi_mode = MSI_MODE_MULTIFUNC;
629
630 err = nx_fw_cmd_create_rx_ctx(adapter); 675 err = nx_fw_cmd_create_rx_ctx(adapter);
631 if (err) 676 if (err)
632 goto err_out_free; 677 goto err_out_free;
@@ -634,23 +679,11 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
634 if (err) 679 if (err)
635 goto err_out_free; 680 goto err_out_free;
636 } else { 681 } else {
637 sds_ring = &recv_ctx->sds_rings[0];
638 sds_ring->crb_sts_consumer =
639 recv_crb_registers[adapter->portnum].crb_sts_consumer;
640
641 adapter->intr_scheme = adapter->pci_read_normalize(adapter,
642 CRB_NIC_CAPABILITIES_FW);
643 adapter->msi_mode = adapter->pci_read_normalize(adapter,
644 CRB_NIC_MSI_MODE_FW);
645 recv_ctx->sds_rings[0].crb_intr_mask =
646 sw_int_mask[adapter->portnum];
647
648 err = netxen_init_old_ctx(adapter); 682 err = netxen_init_old_ctx(adapter);
649 if (err) { 683 if (err) {
650 netxen_free_hw_resources(adapter); 684 netxen_free_hw_resources(adapter);
651 return err; 685 return err;
652 } 686 }
653
654 } 687 }
655 688
656 return 0; 689 return 0;
@@ -665,11 +698,19 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
665 struct netxen_recv_context *recv_ctx; 698 struct netxen_recv_context *recv_ctx;
666 struct nx_host_rds_ring *rds_ring; 699 struct nx_host_rds_ring *rds_ring;
667 struct nx_host_sds_ring *sds_ring; 700 struct nx_host_sds_ring *sds_ring;
701 struct nx_host_tx_ring *tx_ring;
668 int ring; 702 int ring;
669 703
704 int port = adapter->portnum;
705
670 if (adapter->fw_major >= 4) { 706 if (adapter->fw_major >= 4) {
671 nx_fw_cmd_destroy_tx_ctx(adapter); 707 nx_fw_cmd_destroy_tx_ctx(adapter);
672 nx_fw_cmd_destroy_rx_ctx(adapter); 708 nx_fw_cmd_destroy_rx_ctx(adapter);
709 } else {
710 netxen_api_lock(adapter);
711 NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
712 NETXEN_CTX_RESET | port);
713 netxen_api_unlock(adapter);
673 } 714 }
674 715
675 if (adapter->ctx_desc != NULL) { 716 if (adapter->ctx_desc != NULL) {
@@ -681,13 +722,12 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
681 adapter->ctx_desc = NULL; 722 adapter->ctx_desc = NULL;
682 } 723 }
683 724
684 if (adapter->ahw.cmd_desc_head != NULL) { 725 tx_ring = &adapter->tx_ring;
726 if (tx_ring->desc_head != NULL) {
685 pci_free_consistent(adapter->pdev, 727 pci_free_consistent(adapter->pdev,
686 sizeof(struct cmd_desc_type0) * 728 TX_DESC_RINGSIZE(tx_ring),
687 adapter->num_txd, 729 tx_ring->desc_head, tx_ring->phys_addr);
688 adapter->ahw.cmd_desc_head, 730 tx_ring->desc_head = NULL;
689 adapter->ahw.cmd_desc_phys_addr);
690 adapter->ahw.cmd_desc_head = NULL;
691 } 731 }
692 732
693 recv_ctx = &adapter->recv_ctx; 733 recv_ctx = &adapter->recv_ctx;
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index a677ff895184..a452b2facb77 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -30,7 +30,6 @@
30 30
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <asm/uaccess.h>
34#include <linux/pci.h> 33#include <linux/pci.h>
35#include <asm/io.h> 34#include <asm/io.h>
36#include <linux/netdevice.h> 35#include <linux/netdevice.h>
@@ -53,13 +52,9 @@ struct netxen_nic_stats {
53#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF 52#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF
54 53
55static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = { 54static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = {
56 {"rcvd_bad_skb", NETXEN_NIC_STAT(stats.rcvdbadskb)},
57 {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)}, 55 {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)},
58 {"xmited_frames", NETXEN_NIC_STAT(stats.xmitedframes)},
59 {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)}, 56 {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)},
60 {"bad_skb_len", NETXEN_NIC_STAT(stats.badskblen)}, 57 {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)},
61 {"no_cmd_desc", NETXEN_NIC_STAT(stats.nocmddescriptor)},
62 {"polled", NETXEN_NIC_STAT(stats.polled)},
63 {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, 58 {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)},
64 {"csummed", NETXEN_NIC_STAT(stats.csummed)}, 59 {"csummed", NETXEN_NIC_STAT(stats.csummed)},
65 {"no_rcv", NETXEN_NIC_STAT(stats.no_rcv)}, 60 {"no_rcv", NETXEN_NIC_STAT(stats.no_rcv)},
@@ -97,12 +92,9 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
97 strncpy(drvinfo->driver, netxen_nic_driver_name, 32); 92 strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
98 strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); 93 strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
99 write_lock_irqsave(&adapter->adapter_lock, flags); 94 write_lock_irqsave(&adapter->adapter_lock, flags);
100 fw_major = adapter->pci_read_normalize(adapter, 95 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
101 NETXEN_FW_VERSION_MAJOR); 96 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
102 fw_minor = adapter->pci_read_normalize(adapter, 97 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
103 NETXEN_FW_VERSION_MINOR);
104 fw_build = adapter->pci_read_normalize(adapter,
105 NETXEN_FW_VERSION_SUB);
106 write_unlock_irqrestore(&adapter->adapter_lock, flags); 98 write_unlock_irqrestore(&adapter->adapter_lock, flags);
107 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); 99 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
108 100
@@ -115,6 +107,7 @@ static int
115netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 107netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
116{ 108{
117 struct netxen_adapter *adapter = netdev_priv(dev); 109 struct netxen_adapter *adapter = netdev_priv(dev);
110 int check_sfp_module = 0;
118 111
119 /* read which mode */ 112 /* read which mode */
120 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 113 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
@@ -139,7 +132,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
139 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 132 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
140 u32 val; 133 u32 val;
141 134
142 adapter->hw_read_wx(adapter, NETXEN_PORT_MODE_ADDR, &val, 4); 135 val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
143 if (val == NETXEN_PORT_MODE_802_3_AP) { 136 if (val == NETXEN_PORT_MODE_802_3_AP) {
144 ecmd->supported = SUPPORTED_1000baseT_Full; 137 ecmd->supported = SUPPORTED_1000baseT_Full;
145 ecmd->advertising = ADVERTISED_1000baseT_Full; 138 ecmd->advertising = ADVERTISED_1000baseT_Full;
@@ -148,13 +141,19 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
148 ecmd->advertising = ADVERTISED_10000baseT_Full; 141 ecmd->advertising = ADVERTISED_10000baseT_Full;
149 } 142 }
150 143
144 if (netif_running(dev) && adapter->has_link_events) {
145 ecmd->speed = adapter->link_speed;
146 ecmd->autoneg = adapter->link_autoneg;
147 ecmd->duplex = adapter->link_duplex;
148 goto skip;
149 }
150
151 ecmd->port = PORT_TP; 151 ecmd->port = PORT_TP;
152 152
153 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 153 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
154 u16 pcifn = adapter->ahw.pci_func; 154 u16 pcifn = adapter->ahw.pci_func;
155 155
156 adapter->hw_read_wx(adapter, 156 val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
157 P3_LINK_SPEED_REG(pcifn), &val, 4);
158 ecmd->speed = P3_LINK_SPEED_MHZ * 157 ecmd->speed = P3_LINK_SPEED_MHZ *
159 P3_LINK_SPEED_VAL(pcifn, val); 158 P3_LINK_SPEED_VAL(pcifn, val);
160 } else 159 } else
@@ -165,10 +164,11 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
165 } else 164 } else
166 return -EIO; 165 return -EIO;
167 166
167skip:
168 ecmd->phy_address = adapter->physical_port; 168 ecmd->phy_address = adapter->physical_port;
169 ecmd->transceiver = XCVR_EXTERNAL; 169 ecmd->transceiver = XCVR_EXTERNAL;
170 170
171 switch ((netxen_brdtype_t)adapter->ahw.board_type) { 171 switch (adapter->ahw.board_type) {
172 case NETXEN_BRDTYPE_P2_SB35_4G: 172 case NETXEN_BRDTYPE_P2_SB35_4G:
173 case NETXEN_BRDTYPE_P2_SB31_2G: 173 case NETXEN_BRDTYPE_P2_SB31_2G:
174 case NETXEN_BRDTYPE_P3_REF_QG: 174 case NETXEN_BRDTYPE_P3_REF_QG:
@@ -195,7 +195,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
195 case NETXEN_BRDTYPE_P3_HMEZ: 195 case NETXEN_BRDTYPE_P3_HMEZ:
196 ecmd->supported |= SUPPORTED_MII; 196 ecmd->supported |= SUPPORTED_MII;
197 ecmd->advertising |= ADVERTISED_MII; 197 ecmd->advertising |= ADVERTISED_MII;
198 ecmd->port = PORT_FIBRE; 198 ecmd->port = PORT_MII;
199 ecmd->autoneg = AUTONEG_DISABLE; 199 ecmd->autoneg = AUTONEG_DISABLE;
200 break; 200 break;
201 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 201 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
@@ -203,6 +203,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
203 case NETXEN_BRDTYPE_P3_10G_SFP_QT: 203 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
204 ecmd->advertising |= ADVERTISED_TP; 204 ecmd->advertising |= ADVERTISED_TP;
205 ecmd->supported |= SUPPORTED_TP; 205 ecmd->supported |= SUPPORTED_TP;
206 check_sfp_module = netif_running(dev) &&
207 adapter->has_link_events;
206 case NETXEN_BRDTYPE_P2_SB31_10G: 208 case NETXEN_BRDTYPE_P2_SB31_10G:
207 case NETXEN_BRDTYPE_P3_10G_XFP: 209 case NETXEN_BRDTYPE_P3_10G_XFP:
208 ecmd->supported |= SUPPORTED_FIBRE; 210 ecmd->supported |= SUPPORTED_FIBRE;
@@ -217,6 +219,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
217 ecmd->advertising |= 219 ecmd->advertising |=
218 (ADVERTISED_FIBRE | ADVERTISED_TP); 220 (ADVERTISED_FIBRE | ADVERTISED_TP);
219 ecmd->port = PORT_FIBRE; 221 ecmd->port = PORT_FIBRE;
222 check_sfp_module = netif_running(dev) &&
223 adapter->has_link_events;
220 } else { 224 } else {
221 ecmd->autoneg = AUTONEG_ENABLE; 225 ecmd->autoneg = AUTONEG_ENABLE;
222 ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); 226 ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
@@ -227,10 +231,27 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
227 break; 231 break;
228 default: 232 default:
229 printk(KERN_ERR "netxen-nic: Unsupported board model %d\n", 233 printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
230 (netxen_brdtype_t)adapter->ahw.board_type); 234 adapter->ahw.board_type);
231 return -EIO; 235 return -EIO;
232 } 236 }
233 237
238 if (check_sfp_module) {
239 switch (adapter->module_type) {
240 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
241 case LINKEVENT_MODULE_OPTICAL_SRLR:
242 case LINKEVENT_MODULE_OPTICAL_LRM:
243 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
244 ecmd->port = PORT_FIBRE;
245 break;
246 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
247 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
248 case LINKEVENT_MODULE_TWINAX:
249 ecmd->port = PORT_TP;
250 default:
251 ecmd->port = -1;
252 }
253 }
254
234 return 0; 255 return 0;
235} 256}
236 257
@@ -398,12 +419,11 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
398 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | 419 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
399 (adapter->pdev)->device; 420 (adapter->pdev)->device;
400 /* which mode */ 421 /* which mode */
401 adapter->hw_read_wx(adapter, NETXEN_NIU_MODE, &regs_buff[0], 4); 422 regs_buff[0] = NXRD32(adapter, NETXEN_NIU_MODE);
402 mode = regs_buff[0]; 423 mode = regs_buff[0];
403 424
404 /* Common registers to all the modes */ 425 /* Common registers to all the modes */
405 adapter->hw_read_wx(adapter, 426 regs_buff[2] = NXRD32(adapter, NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER);
406 NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER, &regs_buff[2], 4);
407 /* GB/XGB Mode */ 427 /* GB/XGB Mode */
408 mode = (mode / 2) - 1; 428 mode = (mode / 2) - 1;
409 window = 0; 429 window = 0;
@@ -414,9 +434,8 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
414 window = adapter->physical_port * 434 window = adapter->physical_port *
415 NETXEN_NIC_PORT_WINDOW; 435 NETXEN_NIC_PORT_WINDOW;
416 436
417 adapter->hw_read_wx(adapter, 437 regs_buff[i] = NXRD32(adapter,
418 niu_registers[mode].reg[i - 3] + window, 438 niu_registers[mode].reg[i - 3] + window);
419 &regs_buff[i], 4);
420 } 439 }
421 440
422 } 441 }
@@ -440,7 +459,7 @@ static u32 netxen_nic_test_link(struct net_device *dev)
440 return !val; 459 return !val;
441 } 460 }
442 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 461 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
443 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); 462 val = NXRD32(adapter, CRB_XG_STATE);
444 return (val == XG_LINK_UP) ? 0 : 1; 463 return (val == XG_LINK_UP) ? 0 : 1;
445 } 464 }
446 return -EIO; 465 return -EIO;
@@ -504,10 +523,9 @@ netxen_nic_get_pauseparam(struct net_device *dev,
504 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 523 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
505 return; 524 return;
506 /* get flow control settings */ 525 /* get flow control settings */
507 netxen_nic_read_w0(adapter,NETXEN_NIU_GB_MAC_CONFIG_0(port), 526 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
508 &val);
509 pause->rx_pause = netxen_gb_get_rx_flowctl(val); 527 pause->rx_pause = netxen_gb_get_rx_flowctl(val);
510 netxen_nic_read_w0(adapter, NETXEN_NIU_GB_PAUSE_CTL, &val); 528 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
511 switch (port) { 529 switch (port) {
512 case 0: 530 case 0:
513 pause->tx_pause = !(netxen_gb_get_gb0_mask(val)); 531 pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
@@ -527,7 +545,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
527 if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) 545 if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
528 return; 546 return;
529 pause->rx_pause = 1; 547 pause->rx_pause = 1;
530 netxen_nic_read_w0(adapter, NETXEN_NIU_XG_PAUSE_CTL, &val); 548 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
531 if (port == 0) 549 if (port == 0)
532 pause->tx_pause = !(netxen_xg_get_xg0_mask(val)); 550 pause->tx_pause = !(netxen_xg_get_xg0_mask(val));
533 else 551 else
@@ -550,18 +568,17 @@ netxen_nic_set_pauseparam(struct net_device *dev,
550 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 568 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
551 return -EIO; 569 return -EIO;
552 /* set flow control */ 570 /* set flow control */
553 netxen_nic_read_w0(adapter, 571 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
554 NETXEN_NIU_GB_MAC_CONFIG_0(port), &val);
555 572
556 if (pause->rx_pause) 573 if (pause->rx_pause)
557 netxen_gb_rx_flowctl(val); 574 netxen_gb_rx_flowctl(val);
558 else 575 else
559 netxen_gb_unset_rx_flowctl(val); 576 netxen_gb_unset_rx_flowctl(val);
560 577
561 netxen_nic_write_w0(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 578 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
562 val); 579 val);
563 /* set autoneg */ 580 /* set autoneg */
564 netxen_nic_read_w0(adapter, NETXEN_NIU_GB_PAUSE_CTL, &val); 581 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
565 switch (port) { 582 switch (port) {
566 case 0: 583 case 0:
567 if (pause->tx_pause) 584 if (pause->tx_pause)
@@ -589,11 +606,11 @@ netxen_nic_set_pauseparam(struct net_device *dev,
589 netxen_gb_set_gb3_mask(val); 606 netxen_gb_set_gb3_mask(val);
590 break; 607 break;
591 } 608 }
592 netxen_nic_write_w0(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); 609 NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
593 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 610 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
594 if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) 611 if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
595 return -EIO; 612 return -EIO;
596 netxen_nic_read_w0(adapter, NETXEN_NIU_XG_PAUSE_CTL, &val); 613 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
597 if (port == 0) { 614 if (port == 0) {
598 if (pause->tx_pause) 615 if (pause->tx_pause)
599 netxen_xg_unset_xg0_mask(val); 616 netxen_xg_unset_xg0_mask(val);
@@ -605,7 +622,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
605 else 622 else
606 netxen_xg_set_xg1_mask(val); 623 netxen_xg_set_xg1_mask(val);
607 } 624 }
608 netxen_nic_write_w0(adapter, NETXEN_NIU_XG_PAUSE_CTL, val); 625 NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val);
609 } else { 626 } else {
610 printk(KERN_ERR "%s: Unknown board type: %x\n", 627 printk(KERN_ERR "%s: Unknown board type: %x\n",
611 netxen_nic_driver_name, 628 netxen_nic_driver_name,
@@ -619,14 +636,14 @@ static int netxen_nic_reg_test(struct net_device *dev)
619 struct netxen_adapter *adapter = netdev_priv(dev); 636 struct netxen_adapter *adapter = netdev_priv(dev);
620 u32 data_read, data_written; 637 u32 data_read, data_written;
621 638
622 netxen_nic_read_w0(adapter, NETXEN_PCIX_PH_REG(0), &data_read); 639 data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
623 if ((data_read & 0xffff) != PHAN_VENDOR_ID) 640 if ((data_read & 0xffff) != PHAN_VENDOR_ID)
624 return 1; 641 return 1;
625 642
626 data_written = (u32)0xa5a5a5a5; 643 data_written = (u32)0xa5a5a5a5;
627 644
628 netxen_nic_reg_write(adapter, CRB_SCRATCHPAD_TEST, data_written); 645 NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
629 data_read = adapter->pci_read_normalize(adapter, CRB_SCRATCHPAD_TEST); 646 data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST);
630 if (data_written != data_read) 647 if (data_written != data_read)
631 return 1; 648 return 1;
632 649
@@ -743,11 +760,11 @@ netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
743 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 760 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
744 return; 761 return;
745 762
746 wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG_NV); 763 wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
747 if (wol_cfg & (1UL << adapter->portnum)) 764 if (wol_cfg & (1UL << adapter->portnum))
748 wol->supported |= WAKE_MAGIC; 765 wol->supported |= WAKE_MAGIC;
749 766
750 wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG); 767 wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
751 if (wol_cfg & (1UL << adapter->portnum)) 768 if (wol_cfg & (1UL << adapter->portnum))
752 wol->wolopts |= WAKE_MAGIC; 769 wol->wolopts |= WAKE_MAGIC;
753} 770}
@@ -764,16 +781,16 @@ netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
764 if (wol->wolopts & ~WAKE_MAGIC) 781 if (wol->wolopts & ~WAKE_MAGIC)
765 return -EOPNOTSUPP; 782 return -EOPNOTSUPP;
766 783
767 wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG_NV); 784 wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
768 if (!(wol_cfg & (1 << adapter->portnum))) 785 if (!(wol_cfg & (1 << adapter->portnum)))
769 return -EOPNOTSUPP; 786 return -EOPNOTSUPP;
770 787
771 wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG); 788 wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
772 if (wol->wolopts & WAKE_MAGIC) 789 if (wol->wolopts & WAKE_MAGIC)
773 wol_cfg |= 1UL << adapter->portnum; 790 wol_cfg |= 1UL << adapter->portnum;
774 else 791 else
775 wol_cfg &= ~(1UL << adapter->portnum); 792 wol_cfg &= ~(1UL << adapter->portnum);
776 netxen_nic_reg_write(adapter, NETXEN_WOL_CONFIG, wol_cfg); 793 NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg);
777 794
778 return 0; 795 return 0;
779} 796}
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 016c62129c76..7f0ddbfa7b28 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -31,16 +31,8 @@
31#ifndef __NETXEN_NIC_HDR_H_ 31#ifndef __NETXEN_NIC_HDR_H_
32#define __NETXEN_NIC_HDR_H_ 32#define __NETXEN_NIC_HDR_H_
33 33
34#include <linux/module.h>
35#include <linux/kernel.h> 34#include <linux/kernel.h>
36#include <linux/spinlock.h>
37#include <asm/irq.h>
38#include <linux/init.h>
39#include <linux/errno.h>
40#include <linux/pci.h>
41#include <linux/types.h> 35#include <linux/types.h>
42#include <asm/uaccess.h>
43#include <asm/string.h> /* for memset */
44 36
45/* 37/*
46 * The basic unit of access when reading/writing control registers. 38 * The basic unit of access when reading/writing control registers.
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 5026811c04ce..3bb2b8c74d92 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -48,8 +48,49 @@
48#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) 48#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
49#define CRB_INDIRECT_2M (0x1e0000UL) 49#define CRB_INDIRECT_2M (0x1e0000UL)
50 50
51#ifndef readq
52static inline u64 readq(void __iomem *addr)
53{
54 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
55}
56#endif
57
58#ifndef writeq
59static inline void writeq(u64 val, void __iomem *addr)
60{
61 writel(((u32) (val)), (addr));
62 writel(((u32) (val >> 32)), (addr + 4));
63}
64#endif
65
66#define ADDR_IN_RANGE(addr, low, high) \
67 (((addr) < (high)) && ((addr) >= (low)))
68
69#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
70 ((adapter)->ahw.pci_base0 + (off))
71#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
72 ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
73#define PCI_OFFSET_THIRD_RANGE(adapter, off) \
74 ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
75
76static void __iomem *pci_base_offset(struct netxen_adapter *adapter,
77 unsigned long off)
78{
79 if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
80 return PCI_OFFSET_FIRST_RANGE(adapter, off);
81
82 if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END))
83 return PCI_OFFSET_SECOND_RANGE(adapter, off);
84
85 if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END))
86 return PCI_OFFSET_THIRD_RANGE(adapter, off);
87
88 return NULL;
89}
90
51#define CRB_WIN_LOCK_TIMEOUT 100000000 91#define CRB_WIN_LOCK_TIMEOUT 100000000
52static crb_128M_2M_block_map_t crb_128M_2M_map[64] = { 92static crb_128M_2M_block_map_t
93crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
53 {{{0, 0, 0, 0} } }, /* 0: PCI */ 94 {{{0, 0, 0, 0} } }, /* 0: PCI */
54 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ 95 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
55 {1, 0x0110000, 0x0120000, 0x130000}, 96 {1, 0x0110000, 0x0120000, 0x130000},
@@ -279,18 +320,8 @@ static unsigned crb_hub_agt[64] =
279 320
280/* PCI Windowing for DDR regions. */ 321/* PCI Windowing for DDR regions. */
281 322
282#define ADDR_IN_RANGE(addr, low, high) \
283 (((addr) <= (high)) && ((addr) >= (low)))
284
285#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ 323#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
286 324
287#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL
288#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL
289#define NETXEN_NIC_EPG_PAUSE_ADDR1 0x2200010000c28001ULL
290#define NETXEN_NIC_EPG_PAUSE_ADDR2 0x0100088866554433ULL
291
292#define NETXEN_NIC_WINDOW_MARGIN 0x100000
293
294int netxen_nic_set_mac(struct net_device *netdev, void *p) 325int netxen_nic_set_mac(struct net_device *netdev, void *p)
295{ 326{
296 struct netxen_adapter *adapter = netdev_priv(netdev); 327 struct netxen_adapter *adapter = netdev_priv(netdev);
@@ -331,22 +362,20 @@ netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
331 if (adapter->mc_enabled) 362 if (adapter->mc_enabled)
332 return 0; 363 return 0;
333 364
334 adapter->hw_read_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4); 365 val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
335 val |= (1UL << (28+port)); 366 val |= (1UL << (28+port));
336 adapter->hw_write_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4); 367 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
337 368
338 /* add broadcast addr to filter */ 369 /* add broadcast addr to filter */
339 val = 0xffffff; 370 val = 0xffffff;
340 netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 0), val); 371 NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
341 netxen_crb_writelit_adapter(adapter, 372 NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
342 NETXEN_UNICAST_ADDR(port, 0)+4, val);
343 373
344 /* add station addr to filter */ 374 /* add station addr to filter */
345 val = MAC_HI(addr); 375 val = MAC_HI(addr);
346 netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1), val); 376 NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val);
347 val = MAC_LO(addr); 377 val = MAC_LO(addr);
348 netxen_crb_writelit_adapter(adapter, 378 NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val);
349 NETXEN_UNICAST_ADDR(port, 1)+4, val);
350 379
351 adapter->mc_enabled = 1; 380 adapter->mc_enabled = 1;
352 return 0; 381 return 0;
@@ -362,18 +391,17 @@ netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
362 if (!adapter->mc_enabled) 391 if (!adapter->mc_enabled)
363 return 0; 392 return 0;
364 393
365 adapter->hw_read_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4); 394 val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
366 val &= ~(1UL << (28+port)); 395 val &= ~(1UL << (28+port));
367 adapter->hw_write_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4); 396 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
368 397
369 val = MAC_HI(addr); 398 val = MAC_HI(addr);
370 netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 0), val); 399 NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
371 val = MAC_LO(addr); 400 val = MAC_LO(addr);
372 netxen_crb_writelit_adapter(adapter, 401 NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
373 NETXEN_UNICAST_ADDR(port, 0)+4, val);
374 402
375 netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); 403 NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0);
376 netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); 404 NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0);
377 405
378 adapter->mc_enabled = 0; 406 adapter->mc_enabled = 0;
379 return 0; 407 return 0;
@@ -389,10 +417,8 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
389 lo = MAC_LO(addr); 417 lo = MAC_LO(addr);
390 hi = MAC_HI(addr); 418 hi = MAC_HI(addr);
391 419
392 netxen_crb_writelit_adapter(adapter, 420 NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi);
393 NETXEN_MCAST_ADDR(port, index), hi); 421 NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo);
394 netxen_crb_writelit_adapter(adapter,
395 NETXEN_MCAST_ADDR(port, index)+4, lo);
396 422
397 return 0; 423 return 0;
398} 424}
@@ -486,45 +512,44 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
486 512
487static int 513static int
488netxen_send_cmd_descs(struct netxen_adapter *adapter, 514netxen_send_cmd_descs(struct netxen_adapter *adapter,
489 struct cmd_desc_type0 *cmd_desc_arr, int nr_elements) 515 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
490{ 516{
491 uint32_t i, producer; 517 u32 i, producer, consumer;
492 struct netxen_cmd_buffer *pbuf; 518 struct netxen_cmd_buffer *pbuf;
493 struct cmd_desc_type0 *cmd_desc; 519 struct cmd_desc_type0 *cmd_desc;
494 520 struct nx_host_tx_ring *tx_ring;
495 if (nr_elements > MAX_PENDING_DESC_BLOCK_SIZE || nr_elements == 0) {
496 printk(KERN_WARNING "%s: Too many command descriptors in a "
497 "request\n", __func__);
498 return -EINVAL;
499 }
500 521
501 i = 0; 522 i = 0;
502 523
524 tx_ring = &adapter->tx_ring;
503 netif_tx_lock_bh(adapter->netdev); 525 netif_tx_lock_bh(adapter->netdev);
504 526
505 producer = adapter->cmd_producer; 527 producer = tx_ring->producer;
528 consumer = tx_ring->sw_consumer;
529
530 if (nr_desc > find_diff_among(producer, consumer, tx_ring->num_desc)) {
531 netif_tx_unlock_bh(adapter->netdev);
532 return -EBUSY;
533 }
534
506 do { 535 do {
507 cmd_desc = &cmd_desc_arr[i]; 536 cmd_desc = &cmd_desc_arr[i];
508 537
509 pbuf = &adapter->cmd_buf_arr[producer]; 538 pbuf = &tx_ring->cmd_buf_arr[producer];
510 pbuf->skb = NULL; 539 pbuf->skb = NULL;
511 pbuf->frag_count = 0; 540 pbuf->frag_count = 0;
512 541
513 /* adapter->ahw.cmd_desc_head[producer] = *cmd_desc; */ 542 memcpy(&tx_ring->desc_head[producer],
514 memcpy(&adapter->ahw.cmd_desc_head[producer],
515 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); 543 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
516 544
517 producer = get_next_index(producer, 545 producer = get_next_index(producer, tx_ring->num_desc);
518 adapter->num_txd);
519 i++; 546 i++;
520 547
521 } while (i != nr_elements); 548 } while (i != nr_desc);
522 549
523 adapter->cmd_producer = producer; 550 tx_ring->producer = producer;
524 551
525 /* write producer index to start the xmit */ 552 netxen_nic_update_cmd_producer(adapter, tx_ring, producer);
526
527 netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
528 553
529 netif_tx_unlock_bh(adapter->netdev); 554 netif_tx_unlock_bh(adapter->netdev);
530 555
@@ -717,6 +742,28 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable)
717 return rv; 742 return rv;
718} 743}
719 744
745int netxen_linkevent_request(struct netxen_adapter *adapter, int enable)
746{
747 nx_nic_req_t req;
748 u64 word;
749 int rv;
750
751 memset(&req, 0, sizeof(nx_nic_req_t));
752 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
753
754 word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
755 req.req_hdr = cpu_to_le64(word);
756 req.words[0] = cpu_to_le64(enable);
757
758 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
759 if (rv != 0) {
760 printk(KERN_ERR "%s: could not configure link notification\n",
761 adapter->netdev->name);
762 }
763
764 return rv;
765}
766
720/* 767/*
721 * netxen_nic_change_mtu - Change the Maximum Transfer Unit 768 * netxen_nic_change_mtu - Change the Maximum Transfer Unit
722 * @returns 0 on success, negative on failure 769 * @returns 0 on success, negative on failure
@@ -812,8 +859,8 @@ int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
812 crbaddr = CRB_MAC_BLOCK_START + 859 crbaddr = CRB_MAC_BLOCK_START +
813 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); 860 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
814 861
815 adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4); 862 mac_lo = NXRD32(adapter, crbaddr);
816 adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4); 863 mac_hi = NXRD32(adapter, crbaddr+4);
817 864
818 if (pci_func & 1) 865 if (pci_func & 1)
819 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); 866 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
@@ -831,8 +878,7 @@ static int crb_win_lock(struct netxen_adapter *adapter)
831 878
832 while (!done) { 879 while (!done) {
833 /* acquire semaphore3 from PCI HW block */ 880 /* acquire semaphore3 from PCI HW block */
834 adapter->hw_read_wx(adapter, 881 done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM7_LOCK));
835 NETXEN_PCIE_REG(PCIE_SEM7_LOCK), &done, 4);
836 if (done == 1) 882 if (done == 1)
837 break; 883 break;
838 if (timeout >= CRB_WIN_LOCK_TIMEOUT) 884 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
@@ -840,8 +886,7 @@ static int crb_win_lock(struct netxen_adapter *adapter)
840 timeout++; 886 timeout++;
841 udelay(1); 887 udelay(1);
842 } 888 }
843 netxen_crb_writelit_adapter(adapter, 889 NXWR32(adapter, NETXEN_CRB_WIN_LOCK_ID, adapter->portnum);
844 NETXEN_CRB_WIN_LOCK_ID, adapter->portnum);
845 return 0; 890 return 0;
846} 891}
847 892
@@ -849,8 +894,7 @@ static void crb_win_unlock(struct netxen_adapter *adapter)
849{ 894{
850 int val; 895 int val;
851 896
852 adapter->hw_read_wx(adapter, 897 val = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM7_UNLOCK));
853 NETXEN_PCIE_REG(PCIE_SEM7_UNLOCK), &val, 4);
854} 898}
855 899
856/* 900/*
@@ -986,8 +1030,7 @@ netxen_do_load_firmware(struct netxen_adapter *adapter, const char *fwname,
986 dev_info(&pdev->dev, "loading firmware from flash\n"); 1030 dev_info(&pdev->dev, "loading firmware from flash\n");
987 1031
988 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1032 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
989 adapter->pci_write_normalize(adapter, 1033 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
990 NETXEN_ROMUSB_GLB_CAS_RST, 1);
991 1034
992 if (fw) { 1035 if (fw) {
993 __le64 data; 1036 __le64 data;
@@ -1039,13 +1082,10 @@ netxen_do_load_firmware(struct netxen_adapter *adapter, const char *fwname,
1039 msleep(1); 1082 msleep(1);
1040 1083
1041 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1084 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1042 adapter->pci_write_normalize(adapter, 1085 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
1043 NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
1044 else { 1086 else {
1045 adapter->pci_write_normalize(adapter, 1087 NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
1046 NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); 1088 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0);
1047 adapter->pci_write_normalize(adapter,
1048 NETXEN_ROMUSB_GLB_CAS_RST, 0);
1049 } 1089 }
1050 1090
1051 return 0; 1091 return 0;
@@ -1103,8 +1143,7 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname,
1103 if (NETXEN_VERSION_CODE(major, minor, build) > ver) 1143 if (NETXEN_VERSION_CODE(major, minor, build) > ver)
1104 return -EINVAL; 1144 return -EINVAL;
1105 1145
1106 netxen_nic_reg_write(adapter, NETXEN_CAM_RAM(0x1fc), 1146 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
1107 NETXEN_BDINFO_MAGIC);
1108 return 0; 1147 return 0;
1109} 1148}
1110 1149
@@ -1132,8 +1171,7 @@ request_mn:
1132 netxen_rom_fast_read(adapter, 1171 netxen_rom_fast_read(adapter,
1133 NX_FW_VERSION_OFFSET, (int *)&flashed_ver); 1172 NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
1134 if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { 1173 if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
1135 adapter->hw_read_wx(adapter, 1174 capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
1136 NX_PEG_TUNE_CAPABILITY, &capability, 4);
1137 if (capability & NX_PEG_TUNE_MN_PRESENT) { 1175 if (capability & NX_PEG_TUNE_MN_PRESENT) {
1138 fw_type = NX_P3_MN_ROMIMAGE; 1176 fw_type = NX_P3_MN_ROMIMAGE;
1139 goto request_fw; 1177 goto request_fw;
@@ -1173,13 +1211,10 @@ load_fw:
1173} 1211}
1174 1212
1175int 1213int
1176netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, 1214netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data)
1177 ulong off, void *data, int len)
1178{ 1215{
1179 void __iomem *addr; 1216 void __iomem *addr;
1180 1217
1181 BUG_ON(len != 4);
1182
1183 if (ADDR_IN_WINDOW1(off)) { 1218 if (ADDR_IN_WINDOW1(off)) {
1184 addr = NETXEN_CRB_NORMALIZE(adapter, off); 1219 addr = NETXEN_CRB_NORMALIZE(adapter, off);
1185 } else { /* Window 0 */ 1220 } else { /* Window 0 */
@@ -1192,7 +1227,7 @@ netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
1192 return 1; 1227 return 1;
1193 } 1228 }
1194 1229
1195 writel(*(u32 *) data, addr); 1230 writel(data, addr);
1196 1231
1197 if (!ADDR_IN_WINDOW1(off)) 1232 if (!ADDR_IN_WINDOW1(off))
1198 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1233 netxen_nic_pci_change_crbwindow_128M(adapter, 1);
@@ -1200,13 +1235,11 @@ netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
1200 return 0; 1235 return 0;
1201} 1236}
1202 1237
1203int 1238u32
1204netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, 1239netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off)
1205 ulong off, void *data, int len)
1206{ 1240{
1207 void __iomem *addr; 1241 void __iomem *addr;
1208 1242 u32 data;
1209 BUG_ON(len != 4);
1210 1243
1211 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ 1244 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
1212 addr = NETXEN_CRB_NORMALIZE(adapter, off); 1245 addr = NETXEN_CRB_NORMALIZE(adapter, off);
@@ -1220,24 +1253,21 @@ netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter,
1220 return 1; 1253 return 1;
1221 } 1254 }
1222 1255
1223 *(u32 *)data = readl(addr); 1256 data = readl(addr);
1224 1257
1225 if (!ADDR_IN_WINDOW1(off)) 1258 if (!ADDR_IN_WINDOW1(off))
1226 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1259 netxen_nic_pci_change_crbwindow_128M(adapter, 1);
1227 1260
1228 return 0; 1261 return data;
1229} 1262}
1230 1263
1231int 1264int
1232netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, 1265netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
1233 ulong off, void *data, int len)
1234{ 1266{
1235 unsigned long flags = 0; 1267 unsigned long flags = 0;
1236 int rv; 1268 int rv;
1237 1269
1238 BUG_ON(len != 4); 1270 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, 4);
1239
1240 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, len);
1241 1271
1242 if (rv == -1) { 1272 if (rv == -1) {
1243 printk(KERN_ERR "%s: invalid offset: 0x%016lx\n", 1273 printk(KERN_ERR "%s: invalid offset: 0x%016lx\n",
@@ -1250,26 +1280,24 @@ netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter,
1250 write_lock_irqsave(&adapter->adapter_lock, flags); 1280 write_lock_irqsave(&adapter->adapter_lock, flags);
1251 crb_win_lock(adapter); 1281 crb_win_lock(adapter);
1252 netxen_nic_pci_set_crbwindow_2M(adapter, &off); 1282 netxen_nic_pci_set_crbwindow_2M(adapter, &off);
1253 writel(*(uint32_t *)data, (void __iomem *)off); 1283 writel(data, (void __iomem *)off);
1254 crb_win_unlock(adapter); 1284 crb_win_unlock(adapter);
1255 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1285 write_unlock_irqrestore(&adapter->adapter_lock, flags);
1256 } else 1286 } else
1257 writel(*(uint32_t *)data, (void __iomem *)off); 1287 writel(data, (void __iomem *)off);
1258 1288
1259 1289
1260 return 0; 1290 return 0;
1261} 1291}
1262 1292
1263int 1293u32
1264netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, 1294netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
1265 ulong off, void *data, int len)
1266{ 1295{
1267 unsigned long flags = 0; 1296 unsigned long flags = 0;
1268 int rv; 1297 int rv;
1298 u32 data;
1269 1299
1270 BUG_ON(len != 4); 1300 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, 4);
1271
1272 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, len);
1273 1301
1274 if (rv == -1) { 1302 if (rv == -1) {
1275 printk(KERN_ERR "%s: invalid offset: 0x%016lx\n", 1303 printk(KERN_ERR "%s: invalid offset: 0x%016lx\n",
@@ -1282,47 +1310,13 @@ netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter,
1282 write_lock_irqsave(&adapter->adapter_lock, flags); 1310 write_lock_irqsave(&adapter->adapter_lock, flags);
1283 crb_win_lock(adapter); 1311 crb_win_lock(adapter);
1284 netxen_nic_pci_set_crbwindow_2M(adapter, &off); 1312 netxen_nic_pci_set_crbwindow_2M(adapter, &off);
1285 *(uint32_t *)data = readl((void __iomem *)off); 1313 data = readl((void __iomem *)off);
1286 crb_win_unlock(adapter); 1314 crb_win_unlock(adapter);
1287 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1315 write_unlock_irqrestore(&adapter->adapter_lock, flags);
1288 } else 1316 } else
1289 *(uint32_t *)data = readl((void __iomem *)off); 1317 data = readl((void __iomem *)off);
1290
1291 return 0;
1292}
1293 1318
1294void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val) 1319 return data;
1295{
1296 adapter->hw_write_wx(adapter, off, &val, 4);
1297}
1298
1299int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off)
1300{
1301 int val;
1302 adapter->hw_read_wx(adapter, off, &val, 4);
1303 return val;
1304}
1305
1306/* Change the window to 0, write and change back to window 1. */
1307void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value)
1308{
1309 adapter->hw_write_wx(adapter, index, &value, 4);
1310}
1311
1312/* Change the window to 0, read and change back to window 1. */
1313void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 *value)
1314{
1315 adapter->hw_read_wx(adapter, index, value, 4);
1316}
1317
1318void netxen_nic_write_w1(struct netxen_adapter *adapter, u32 index, u32 value)
1319{
1320 adapter->hw_write_wx(adapter, index, &value, 4);
1321}
1322
1323void netxen_nic_read_w1(struct netxen_adapter *adapter, u32 index, u32 *value)
1324{
1325 adapter->hw_read_wx(adapter, index, value, 4);
1326} 1320}
1327 1321
1328/* 1322/*
@@ -1425,17 +1419,6 @@ u32 netxen_nic_pci_read_immediate_128M(struct netxen_adapter *adapter, u64 off)
1425 return readl((void __iomem *)(pci_base_offset(adapter, off))); 1419 return readl((void __iomem *)(pci_base_offset(adapter, off)));
1426} 1420}
1427 1421
1428void netxen_nic_pci_write_normalize_128M(struct netxen_adapter *adapter,
1429 u64 off, u32 data)
1430{
1431 writel(data, NETXEN_CRB_NORMALIZE(adapter, off));
1432}
1433
1434u32 netxen_nic_pci_read_normalize_128M(struct netxen_adapter *adapter, u64 off)
1435{
1436 return readl(NETXEN_CRB_NORMALIZE(adapter, off));
1437}
1438
1439unsigned long 1422unsigned long
1440netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, 1423netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
1441 unsigned long long addr) 1424 unsigned long long addr)
@@ -1447,12 +1430,10 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
1447 /* DDR network side */ 1430 /* DDR network side */
1448 window = MN_WIN(addr); 1431 window = MN_WIN(addr);
1449 adapter->ahw.ddr_mn_window = window; 1432 adapter->ahw.ddr_mn_window = window;
1450 adapter->hw_write_wx(adapter, 1433 NXWR32(adapter, adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
1451 adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE, 1434 window);
1452 &window, 4); 1435 win_read = NXRD32(adapter,
1453 adapter->hw_read_wx(adapter, 1436 adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE);
1454 adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
1455 &win_read, 4);
1456 if ((win_read << 17) != window) { 1437 if ((win_read << 17) != window) {
1457 printk(KERN_INFO "Written MNwin (0x%x) != " 1438 printk(KERN_INFO "Written MNwin (0x%x) != "
1458 "Read MNwin (0x%x)\n", window, win_read); 1439 "Read MNwin (0x%x)\n", window, win_read);
@@ -1467,12 +1448,10 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
1467 1448
1468 window = OCM_WIN(addr); 1449 window = OCM_WIN(addr);
1469 adapter->ahw.ddr_mn_window = window; 1450 adapter->ahw.ddr_mn_window = window;
1470 adapter->hw_write_wx(adapter, 1451 NXWR32(adapter, adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
1471 adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE, 1452 window);
1472 &window, 4); 1453 win_read = NXRD32(adapter,
1473 adapter->hw_read_wx(adapter, 1454 adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE);
1474 adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
1475 &win_read, 4);
1476 if ((win_read >> 7) != window) { 1455 if ((win_read >> 7) != window) {
1477 printk(KERN_INFO "%s: Written OCMwin (0x%x) != " 1456 printk(KERN_INFO "%s: Written OCMwin (0x%x) != "
1478 "Read OCMwin (0x%x)\n", 1457 "Read OCMwin (0x%x)\n",
@@ -1485,12 +1464,10 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
1485 /* QDR network side */ 1464 /* QDR network side */
1486 window = MS_WIN(addr); 1465 window = MS_WIN(addr);
1487 adapter->ahw.qdr_sn_window = window; 1466 adapter->ahw.qdr_sn_window = window;
1488 adapter->hw_write_wx(adapter, 1467 NXWR32(adapter, adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE,
1489 adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE, 1468 window);
1490 &window, 4); 1469 win_read = NXRD32(adapter,
1491 adapter->hw_read_wx(adapter, 1470 adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE);
1492 adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE,
1493 &win_read, 4);
1494 if (win_read != window) { 1471 if (win_read != window) {
1495 printk(KERN_INFO "%s: Written MSwin (0x%x) != " 1472 printk(KERN_INFO "%s: Written MSwin (0x%x) != "
1496 "Read MSwin (0x%x)\n", 1473 "Read MSwin (0x%x)\n",
@@ -1936,27 +1913,20 @@ netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
1936 1913
1937 for (i = 0; i < loop; i++) { 1914 for (i = 0; i < loop; i++) {
1938 temp = off8 + (i << 3); 1915 temp = off8 + (i << 3);
1939 adapter->hw_write_wx(adapter, 1916 NXWR32(adapter, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1940 mem_crb+MIU_TEST_AGT_ADDR_LO, &temp, 4);
1941 temp = 0; 1917 temp = 0;
1942 adapter->hw_write_wx(adapter, 1918 NXWR32(adapter, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1943 mem_crb+MIU_TEST_AGT_ADDR_HI, &temp, 4);
1944 temp = word[i] & 0xffffffff; 1919 temp = word[i] & 0xffffffff;
1945 adapter->hw_write_wx(adapter, 1920 NXWR32(adapter, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1946 mem_crb+MIU_TEST_AGT_WRDATA_LO, &temp, 4);
1947 temp = (word[i] >> 32) & 0xffffffff; 1921 temp = (word[i] >> 32) & 0xffffffff;
1948 adapter->hw_write_wx(adapter, 1922 NXWR32(adapter, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1949 mem_crb+MIU_TEST_AGT_WRDATA_HI, &temp, 4);
1950 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1923 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1951 adapter->hw_write_wx(adapter, 1924 NXWR32(adapter, mem_crb+MIU_TEST_AGT_CTRL, temp);
1952 mem_crb+MIU_TEST_AGT_CTRL, &temp, 4);
1953 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1925 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1954 adapter->hw_write_wx(adapter, 1926 NXWR32(adapter, mem_crb+MIU_TEST_AGT_CTRL, temp);
1955 mem_crb+MIU_TEST_AGT_CTRL, &temp, 4);
1956 1927
1957 for (j = 0; j < MAX_CTL_CHECK; j++) { 1928 for (j = 0; j < MAX_CTL_CHECK; j++) {
1958 adapter->hw_read_wx(adapter, 1929 temp = NXRD32(adapter, mem_crb + MIU_TEST_AGT_CTRL);
1959 mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
1960 if ((temp & MIU_TA_CTL_BUSY) == 0) 1930 if ((temp & MIU_TA_CTL_BUSY) == 0)
1961 break; 1931 break;
1962 } 1932 }
@@ -2013,21 +1983,16 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
2013 1983
2014 for (i = 0; i < loop; i++) { 1984 for (i = 0; i < loop; i++) {
2015 temp = off8 + (i << 3); 1985 temp = off8 + (i << 3);
2016 adapter->hw_write_wx(adapter, 1986 NXWR32(adapter, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
2017 mem_crb + MIU_TEST_AGT_ADDR_LO, &temp, 4);
2018 temp = 0; 1987 temp = 0;
2019 adapter->hw_write_wx(adapter, 1988 NXWR32(adapter, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
2020 mem_crb + MIU_TEST_AGT_ADDR_HI, &temp, 4);
2021 temp = MIU_TA_CTL_ENABLE; 1989 temp = MIU_TA_CTL_ENABLE;
2022 adapter->hw_write_wx(adapter, 1990 NXWR32(adapter, mem_crb + MIU_TEST_AGT_CTRL, temp);
2023 mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
2024 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; 1991 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
2025 adapter->hw_write_wx(adapter, 1992 NXWR32(adapter, mem_crb + MIU_TEST_AGT_CTRL, temp);
2026 mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
2027 1993
2028 for (j = 0; j < MAX_CTL_CHECK; j++) { 1994 for (j = 0; j < MAX_CTL_CHECK; j++) {
2029 adapter->hw_read_wx(adapter, 1995 temp = NXRD32(adapter, mem_crb + MIU_TEST_AGT_CTRL);
2030 mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
2031 if ((temp & MIU_TA_CTL_BUSY) == 0) 1996 if ((temp & MIU_TA_CTL_BUSY) == 0)
2032 break; 1997 break;
2033 } 1998 }
@@ -2042,8 +2007,8 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
2042 start = off0[i] >> 2; 2007 start = off0[i] >> 2;
2043 end = (off0[i] + sz[i] - 1) >> 2; 2008 end = (off0[i] + sz[i] - 1) >> 2;
2044 for (k = start; k <= end; k++) { 2009 for (k = start; k <= end; k++) {
2045 adapter->hw_read_wx(adapter, 2010 temp = NXRD32(adapter,
2046 mem_crb + MIU_TEST_AGT_RDDATA(k), &temp, 4); 2011 mem_crb + MIU_TEST_AGT_RDDATA(k));
2047 word[i] |= ((uint64_t)temp << (32 * k)); 2012 word[i] |= ((uint64_t)temp << (32 * k));
2048 } 2013 }
2049 } 2014 }
@@ -2086,29 +2051,14 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
2086int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter, 2051int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter,
2087 u64 off, u32 data) 2052 u64 off, u32 data)
2088{ 2053{
2089 adapter->hw_write_wx(adapter, off, &data, 4); 2054 NXWR32(adapter, off, data);
2090 2055
2091 return 0; 2056 return 0;
2092} 2057}
2093 2058
2094u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off) 2059u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off)
2095{ 2060{
2096 u32 temp; 2061 return NXRD32(adapter, off);
2097 adapter->hw_read_wx(adapter, off, &temp, 4);
2098 return temp;
2099}
2100
2101void netxen_nic_pci_write_normalize_2M(struct netxen_adapter *adapter,
2102 u64 off, u32 data)
2103{
2104 adapter->hw_write_wx(adapter, off, &data, 4);
2105}
2106
2107u32 netxen_nic_pci_read_normalize_2M(struct netxen_adapter *adapter, u64 off)
2108{
2109 u32 temp;
2110 adapter->hw_read_wx(adapter, off, &temp, 4);
2111 return temp;
2112} 2062}
2113 2063
2114int netxen_nic_get_board_info(struct netxen_adapter *adapter) 2064int netxen_nic_get_board_info(struct netxen_adapter *adapter)
@@ -2142,13 +2092,12 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2142 adapter->ahw.board_type = board_type; 2092 adapter->ahw.board_type = board_type;
2143 2093
2144 if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { 2094 if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
2145 u32 gpio = netxen_nic_reg_read(adapter, 2095 u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I);
2146 NETXEN_ROMUSB_GLB_PAD_GPIO_I);
2147 if ((gpio & 0x8000) == 0) 2096 if ((gpio & 0x8000) == 0)
2148 board_type = NETXEN_BRDTYPE_P3_10G_TP; 2097 board_type = NETXEN_BRDTYPE_P3_10G_TP;
2149 } 2098 }
2150 2099
2151 switch ((netxen_brdtype_t)board_type) { 2100 switch (board_type) {
2152 case NETXEN_BRDTYPE_P2_SB35_4G: 2101 case NETXEN_BRDTYPE_P2_SB35_4G:
2153 adapter->ahw.port_type = NETXEN_NIC_GBE; 2102 adapter->ahw.port_type = NETXEN_NIC_GBE;
2154 break; 2103 break;
@@ -2195,8 +2144,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2195int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) 2144int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
2196{ 2145{
2197 new_mtu += MTU_FUDGE_FACTOR; 2146 new_mtu += MTU_FUDGE_FACTOR;
2198 netxen_nic_write_w0(adapter, 2147 NXWR32(adapter, NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port),
2199 NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port),
2200 new_mtu); 2148 new_mtu);
2201 return 0; 2149 return 0;
2202} 2150}
@@ -2205,21 +2153,12 @@ int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
2205{ 2153{
2206 new_mtu += MTU_FUDGE_FACTOR; 2154 new_mtu += MTU_FUDGE_FACTOR;
2207 if (adapter->physical_port == 0) 2155 if (adapter->physical_port == 0)
2208 netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, 2156 NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu);
2209 new_mtu);
2210 else 2157 else
2211 netxen_nic_write_w0(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, 2158 NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu);
2212 new_mtu);
2213 return 0; 2159 return 0;
2214} 2160}
2215 2161
2216void
2217netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
2218 unsigned long off, int data)
2219{
2220 adapter->hw_write_wx(adapter, off, &data, 4);
2221}
2222
2223void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) 2162void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2224{ 2163{
2225 __u32 status; 2164 __u32 status;
@@ -2234,8 +2173,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2234 } 2173 }
2235 2174
2236 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 2175 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
2237 adapter->hw_read_wx(adapter, 2176 port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
2238 NETXEN_PORT_MODE_ADDR, &port_mode, 4);
2239 if (port_mode == NETXEN_PORT_MODE_802_3_AP) { 2177 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
2240 adapter->link_speed = SPEED_1000; 2178 adapter->link_speed = SPEED_1000;
2241 adapter->link_duplex = DUPLEX_FULL; 2179 adapter->link_duplex = DUPLEX_FULL;
@@ -2312,9 +2250,9 @@ void netxen_nic_get_firmware_info(struct netxen_adapter *adapter)
2312 addr += sizeof(u32); 2250 addr += sizeof(u32);
2313 } 2251 }
2314 2252
2315 adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_MAJOR, &fw_major, 4); 2253 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
2316 adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_MINOR, &fw_minor, 4); 2254 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
2317 adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_SUB, &fw_build, 4); 2255 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
2318 2256
2319 adapter->fw_major = fw_major; 2257 adapter->fw_major = fw_major;
2320 adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); 2258 adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
@@ -2337,8 +2275,7 @@ void netxen_nic_get_firmware_info(struct netxen_adapter *adapter)
2337 fw_major, fw_minor, fw_build); 2275 fw_major, fw_minor, fw_build);
2338 2276
2339 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 2277 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
2340 adapter->hw_read_wx(adapter, 2278 i = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
2341 NETXEN_MIU_MN_CONTROL, &i, 4);
2342 adapter->ahw.cut_through = (i & 0x4) ? 1 : 0; 2279 adapter->ahw.cut_through = (i & 0x4) ? 1 : 0;
2343 dev_info(&pdev->dev, "firmware running in %s mode\n", 2280 dev_info(&pdev->dev, "firmware running in %s mode\n",
2344 adapter->ahw.cut_through ? "cut-through" : "legacy"); 2281 adapter->ahw.cut_through ? "cut-through" : "legacy");
@@ -2353,9 +2290,9 @@ netxen_nic_wol_supported(struct netxen_adapter *adapter)
2353 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2290 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2354 return 0; 2291 return 0;
2355 2292
2356 wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG_NV); 2293 wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
2357 if (wol_cfg & (1UL << adapter->portnum)) { 2294 if (wol_cfg & (1UL << adapter->portnum)) {
2358 wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG); 2295 wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
2359 if (wol_cfg & (1 << adapter->portnum)) 2296 if (wol_cfg & (1 << adapter->portnum))
2360 return 1; 2297 return 1;
2361 } 2298 }
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 04b47a7993cd..f20c96591a87 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -36,35 +36,15 @@
36/* Hardware memory size of 128 meg */ 36/* Hardware memory size of 128 meg */
37#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024) 37#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024)
38 38
39#ifndef readq
40static inline u64 readq(void __iomem * addr)
41{
42 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
43}
44#endif
45
46#ifndef writeq
47static inline void writeq(u64 val, void __iomem * addr)
48{
49 writel(((u32) (val)), (addr));
50 writel(((u32) (val >> 32)), (addr + 4));
51}
52#endif
53
54struct netxen_adapter; 39struct netxen_adapter;
55 40
56#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20) 41#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20)
57 42
58struct netxen_port;
59void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); 43void netxen_nic_set_link_parameters(struct netxen_adapter *adapter);
60 44
61typedef u8 netxen_ethernet_macaddr_t[6]; 45typedef u8 netxen_ethernet_macaddr_t[6];
62 46
63/* Nibble or Byte mode for phy interface (GbE mode only) */ 47/* Nibble or Byte mode for phy interface (GbE mode only) */
64typedef enum {
65 NETXEN_NIU_10_100_MB = 0,
66 NETXEN_NIU_1000_MB
67} netxen_niu_gbe_ifmode_t;
68 48
69#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1) 49#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1)
70 50
@@ -222,30 +202,28 @@ typedef enum {
222/* 202/*
223 * PHY-Specific MII control/status registers. 203 * PHY-Specific MII control/status registers.
224 */ 204 */
225typedef enum { 205#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0
226 NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL = 0, 206#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1
227 NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS = 1, 207#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2
228 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 = 2, 208#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3
229 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 = 3, 209#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
230 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG = 4, 210#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5
231 NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART = 5, 211#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6
232 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE = 6, 212#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7
233 NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT = 7, 213#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8
234 NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE = 8, 214#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9
235 NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL = 9, 215#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10
236 NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS = 10, 216#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15
237 NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS = 15, 217#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16
238 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL = 16, 218#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
239 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS = 17, 219#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18
240 NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE = 18, 220#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19
241 NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS = 19, 221#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20
242 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE = 20, 222#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21
243 NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT = 21, 223#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24
244 NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL = 24, 224#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25
245 NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE = 25, 225#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26
246 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET = 26, 226#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27
247 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE = 27
248} netxen_niu_phy_register_t;
249 227
250/* 228/*
251 * PHY-Specific Status Register (reg 17). 229 * PHY-Specific Status Register (reg 17).
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 0759c35f16ac..8893a973399a 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -108,42 +108,6 @@ static void crb_addr_transform_setup(void)
108 crb_addr_transform(I2C0); 108 crb_addr_transform(I2C0);
109} 109}
110 110
111int netxen_init_firmware(struct netxen_adapter *adapter)
112{
113 u32 state = 0, loops = 0, err = 0;
114
115 /* Window 1 call */
116 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
117
118 if (state == PHAN_INITIALIZE_ACK)
119 return 0;
120
121 while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
122 msleep(1);
123 /* Window 1 call */
124 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
125
126 loops++;
127 }
128 if (loops >= 2000) {
129 printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
130 state);
131 err = -EIO;
132 return err;
133 }
134 /* Window 1 call */
135 adapter->pci_write_normalize(adapter,
136 CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
137 adapter->pci_write_normalize(adapter,
138 CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
139 adapter->pci_write_normalize(adapter,
140 CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
141 adapter->pci_write_normalize(adapter,
142 CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
143
144 return err;
145}
146
147void netxen_release_rx_buffers(struct netxen_adapter *adapter) 111void netxen_release_rx_buffers(struct netxen_adapter *adapter)
148{ 112{
149 struct netxen_recv_context *recv_ctx; 113 struct netxen_recv_context *recv_ctx;
@@ -173,9 +137,10 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
173 struct netxen_cmd_buffer *cmd_buf; 137 struct netxen_cmd_buffer *cmd_buf;
174 struct netxen_skb_frag *buffrag; 138 struct netxen_skb_frag *buffrag;
175 int i, j; 139 int i, j;
140 struct nx_host_tx_ring *tx_ring = &adapter->tx_ring;
176 141
177 cmd_buf = adapter->cmd_buf_arr; 142 cmd_buf = tx_ring->cmd_buf_arr;
178 for (i = 0; i < adapter->num_txd; i++) { 143 for (i = 0; i < tx_ring->num_desc; i++) {
179 buffrag = cmd_buf->frag_array; 144 buffrag = cmd_buf->frag_array;
180 if (buffrag->dma) { 145 if (buffrag->dma) {
181 pci_unmap_single(adapter->pdev, buffrag->dma, 146 pci_unmap_single(adapter->pdev, buffrag->dma,
@@ -203,6 +168,7 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
203{ 168{
204 struct netxen_recv_context *recv_ctx; 169 struct netxen_recv_context *recv_ctx;
205 struct nx_host_rds_ring *rds_ring; 170 struct nx_host_rds_ring *rds_ring;
171 struct nx_host_tx_ring *tx_ring;
206 int ring; 172 int ring;
207 173
208 recv_ctx = &adapter->recv_ctx; 174 recv_ctx = &adapter->recv_ctx;
@@ -214,8 +180,9 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
214 } 180 }
215 } 181 }
216 182
217 if (adapter->cmd_buf_arr) 183 tx_ring = &adapter->tx_ring;
218 vfree(adapter->cmd_buf_arr); 184 if (tx_ring->cmd_buf_arr)
185 vfree(tx_ring->cmd_buf_arr);
219 return; 186 return;
220} 187}
221 188
@@ -224,21 +191,24 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
224 struct netxen_recv_context *recv_ctx; 191 struct netxen_recv_context *recv_ctx;
225 struct nx_host_rds_ring *rds_ring; 192 struct nx_host_rds_ring *rds_ring;
226 struct nx_host_sds_ring *sds_ring; 193 struct nx_host_sds_ring *sds_ring;
194 struct nx_host_tx_ring *tx_ring = &adapter->tx_ring;
227 struct netxen_rx_buffer *rx_buf; 195 struct netxen_rx_buffer *rx_buf;
228 int ring, i, num_rx_bufs; 196 int ring, i, num_rx_bufs;
229 197
230 struct netxen_cmd_buffer *cmd_buf_arr; 198 struct netxen_cmd_buffer *cmd_buf_arr;
231 struct net_device *netdev = adapter->netdev; 199 struct net_device *netdev = adapter->netdev;
200 struct pci_dev *pdev = adapter->pdev;
232 201
202 tx_ring->num_desc = adapter->num_txd;
233 cmd_buf_arr = 203 cmd_buf_arr =
234 (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(adapter)); 204 (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(tx_ring));
235 if (cmd_buf_arr == NULL) { 205 if (cmd_buf_arr == NULL) {
236 printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n", 206 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
237 netdev->name); 207 netdev->name);
238 return -ENOMEM; 208 return -ENOMEM;
239 } 209 }
240 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(adapter)); 210 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
241 adapter->cmd_buf_arr = cmd_buf_arr; 211 tx_ring->cmd_buf_arr = cmd_buf_arr;
242 212
243 recv_ctx = &adapter->recv_ctx; 213 recv_ctx = &adapter->recv_ctx;
244 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 214 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -307,8 +277,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
307 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 277 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
308 sds_ring = &recv_ctx->sds_rings[ring]; 278 sds_ring = &recv_ctx->sds_rings[ring];
309 sds_ring->irq = adapter->msix_entries[ring].vector; 279 sds_ring->irq = adapter->msix_entries[ring].vector;
310 sds_ring->clean_tx = (ring == 0);
311 sds_ring->post_rxd = (ring == 0);
312 sds_ring->adapter = adapter; 280 sds_ring->adapter = adapter;
313 sds_ring->num_desc = adapter->num_rxd; 281 sds_ring->num_desc = adapter->num_rxd;
314 282
@@ -400,8 +368,7 @@ static int rom_lock(struct netxen_adapter *adapter)
400 368
401 while (!done) { 369 while (!done) {
402 /* acquire semaphore2 from PCI HW block */ 370 /* acquire semaphore2 from PCI HW block */
403 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK), 371 done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK));
404 &done);
405 if (done == 1) 372 if (done == 1)
406 break; 373 break;
407 if (timeout >= rom_lock_timeout) 374 if (timeout >= rom_lock_timeout)
@@ -418,7 +385,7 @@ static int rom_lock(struct netxen_adapter *adapter)
418 cpu_relax(); /*This a nop instr on i386 */ 385 cpu_relax(); /*This a nop instr on i386 */
419 } 386 }
420 } 387 }
421 netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER); 388 NXWR32(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
422 return 0; 389 return 0;
423} 390}
424 391
@@ -430,7 +397,7 @@ static int netxen_wait_rom_done(struct netxen_adapter *adapter)
430 cond_resched(); 397 cond_resched();
431 398
432 while (done == 0) { 399 while (done == 0) {
433 done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS); 400 done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
434 done &= 2; 401 done &= 2;
435 timeout++; 402 timeout++;
436 if (timeout >= rom_max_timeout) { 403 if (timeout >= rom_max_timeout) {
@@ -443,30 +410,28 @@ static int netxen_wait_rom_done(struct netxen_adapter *adapter)
443 410
444static void netxen_rom_unlock(struct netxen_adapter *adapter) 411static void netxen_rom_unlock(struct netxen_adapter *adapter)
445{ 412{
446 u32 val;
447
448 /* release semaphore2 */ 413 /* release semaphore2 */
449 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val); 414 NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK));
450 415
451} 416}
452 417
453static int do_rom_fast_read(struct netxen_adapter *adapter, 418static int do_rom_fast_read(struct netxen_adapter *adapter,
454 int addr, int *valp) 419 int addr, int *valp)
455{ 420{
456 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); 421 NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
457 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 422 NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
458 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); 423 NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
459 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); 424 NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
460 if (netxen_wait_rom_done(adapter)) { 425 if (netxen_wait_rom_done(adapter)) {
461 printk("Error waiting for rom done\n"); 426 printk("Error waiting for rom done\n");
462 return -EIO; 427 return -EIO;
463 } 428 }
464 /* reset abyte_cnt and dummy_byte_cnt */ 429 /* reset abyte_cnt and dummy_byte_cnt */
465 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); 430 NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
466 udelay(10); 431 udelay(10);
467 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 432 NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
468 433
469 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA); 434 *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA);
470 return 0; 435 return 0;
471} 436}
472 437
@@ -530,8 +495,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
530 495
531 /* resetall */ 496 /* resetall */
532 rom_lock(adapter); 497 rom_lock(adapter);
533 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 498 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff);
534 0xffffffff);
535 netxen_rom_unlock(adapter); 499 netxen_rom_unlock(adapter);
536 500
537 if (verbose) { 501 if (verbose) {
@@ -655,7 +619,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
655 } 619 }
656 } 620 }
657 621
658 adapter->hw_write_wx(adapter, off, &buf[i].data, 4); 622 NXWR32(adapter, off, buf[i].data);
659 623
660 msleep(init_delay); 624 msleep(init_delay);
661 } 625 }
@@ -665,33 +629,31 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
665 629
666 /* unreset_net_cache */ 630 /* unreset_net_cache */
667 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 631 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
668 adapter->hw_read_wx(adapter, 632 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
669 NETXEN_ROMUSB_GLB_SW_RESET, &val, 4); 633 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
670 netxen_crb_writelit_adapter(adapter,
671 NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
672 } 634 }
673 635
674 /* p2dn replyCount */ 636 /* p2dn replyCount */
675 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); 637 NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
676 /* disable_peg_cache 0 */ 638 /* disable_peg_cache 0 */
677 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); 639 NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
678 /* disable_peg_cache 1 */ 640 /* disable_peg_cache 1 */
679 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); 641 NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
680 642
681 /* peg_clr_all */ 643 /* peg_clr_all */
682 644
683 /* peg_clr 0 */ 645 /* peg_clr 0 */
684 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); 646 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
685 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); 647 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
686 /* peg_clr 1 */ 648 /* peg_clr 1 */
687 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); 649 NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
688 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); 650 NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
689 /* peg_clr 2 */ 651 /* peg_clr 2 */
690 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); 652 NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
691 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); 653 NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
692 /* peg_clr 3 */ 654 /* peg_clr 3 */
693 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); 655 NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
694 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); 656 NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
695 return 0; 657 return 0;
696} 658}
697 659
@@ -715,12 +677,12 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
715 hi = (addr >> 32) & 0xffffffff; 677 hi = (addr >> 32) & 0xffffffff;
716 lo = addr & 0xffffffff; 678 lo = addr & 0xffffffff;
717 679
718 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); 680 NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
719 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); 681 NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
720 682
721 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 683 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
722 uint32_t temp = 0; 684 uint32_t temp = 0;
723 adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4); 685 NXWR32(adapter, CRB_HOST_DUMMY_BUF, temp);
724 } 686 }
725 687
726 return 0; 688 return 0;
@@ -762,8 +724,7 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
762 724
763 if (!pegtune_val) { 725 if (!pegtune_val) {
764 do { 726 do {
765 val = adapter->pci_read_normalize(adapter, 727 val = NXRD32(adapter, CRB_CMDPEG_STATE);
766 CRB_CMDPEG_STATE);
767 728
768 if (val == PHAN_INITIALIZE_COMPLETE || 729 if (val == PHAN_INITIALIZE_COMPLETE ||
769 val == PHAN_INITIALIZE_ACK) 730 val == PHAN_INITIALIZE_ACK)
@@ -774,7 +735,7 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
774 } while (--retries); 735 } while (--retries);
775 736
776 if (!retries) { 737 if (!retries) {
777 pegtune_val = adapter->pci_read_normalize(adapter, 738 pegtune_val = NXRD32(adapter,
778 NETXEN_ROMUSB_GLB_PEGTUNE_DONE); 739 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
779 printk(KERN_WARNING "netxen_phantom_init: init failed, " 740 printk(KERN_WARNING "netxen_phantom_init: init failed, "
780 "pegtune_val=%x\n", pegtune_val); 741 "pegtune_val=%x\n", pegtune_val);
@@ -785,13 +746,14 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
785 return 0; 746 return 0;
786} 747}
787 748
788int netxen_receive_peg_ready(struct netxen_adapter *adapter) 749static int
750netxen_receive_peg_ready(struct netxen_adapter *adapter)
789{ 751{
790 u32 val = 0; 752 u32 val = 0;
791 int retries = 2000; 753 int retries = 2000;
792 754
793 do { 755 do {
794 val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE); 756 val = NXRD32(adapter, CRB_RCVPEG_STATE);
795 757
796 if (val == PHAN_PEG_RCV_INITIALIZED) 758 if (val == PHAN_PEG_RCV_INITIALIZED)
797 return 0; 759 return 0;
@@ -809,6 +771,93 @@ int netxen_receive_peg_ready(struct netxen_adapter *adapter)
809 return 0; 771 return 0;
810} 772}
811 773
774int netxen_init_firmware(struct netxen_adapter *adapter)
775{
776 int err;
777
778 err = netxen_receive_peg_ready(adapter);
779 if (err)
780 return err;
781
782 NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
783 NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
784 NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
785 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
786
787 if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) {
788 adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
789 }
790
791 return err;
792}
793
794static void
795netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
796{
797 u32 cable_OUI;
798 u16 cable_len;
799 u16 link_speed;
800 u8 link_status, module, duplex, autoneg;
801 struct net_device *netdev = adapter->netdev;
802
803 adapter->has_link_events = 1;
804
805 cable_OUI = msg->body[1] & 0xffffffff;
806 cable_len = (msg->body[1] >> 32) & 0xffff;
807 link_speed = (msg->body[1] >> 48) & 0xffff;
808
809 link_status = msg->body[2] & 0xff;
810 duplex = (msg->body[2] >> 16) & 0xff;
811 autoneg = (msg->body[2] >> 24) & 0xff;
812
813 module = (msg->body[2] >> 8) & 0xff;
814 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
815 printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
816 netdev->name, cable_OUI, cable_len);
817 } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
818 printk(KERN_INFO "%s: unsupported cable length %d\n",
819 netdev->name, cable_len);
820 }
821
822 netxen_advert_link_change(adapter, link_status);
823
824 /* update link parameters */
825 if (duplex == LINKEVENT_FULL_DUPLEX)
826 adapter->link_duplex = DUPLEX_FULL;
827 else
828 adapter->link_duplex = DUPLEX_HALF;
829 adapter->module_type = module;
830 adapter->link_autoneg = autoneg;
831 adapter->link_speed = link_speed;
832}
833
834static void
835netxen_handle_fw_message(int desc_cnt, int index,
836 struct nx_host_sds_ring *sds_ring)
837{
838 nx_fw_msg_t msg;
839 struct status_desc *desc;
840 int i = 0, opcode;
841
842 while (desc_cnt > 0 && i < 8) {
843 desc = &sds_ring->desc_head[index];
844 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
845 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
846
847 index = get_next_index(index, sds_ring->num_desc);
848 desc_cnt--;
849 }
850
851 opcode = netxen_get_nic_msg_opcode(msg.body[0]);
852 switch (opcode) {
853 case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
854 netxen_handle_linkevent(sds_ring->adapter, &msg);
855 break;
856 default:
857 break;
858 }
859}
860
812static int 861static int
813netxen_alloc_rx_skb(struct netxen_adapter *adapter, 862netxen_alloc_rx_skb(struct netxen_adapter *adapter,
814 struct nx_host_rds_ring *rds_ring, 863 struct nx_host_rds_ring *rds_ring,
@@ -874,7 +923,8 @@ no_skb:
874 923
875static struct netxen_rx_buffer * 924static struct netxen_rx_buffer *
876netxen_process_rcv(struct netxen_adapter *adapter, 925netxen_process_rcv(struct netxen_adapter *adapter,
877 int ring, int index, int length, int cksum, int pkt_offset) 926 int ring, int index, int length, int cksum, int pkt_offset,
927 struct nx_host_sds_ring *sds_ring)
878{ 928{
879 struct net_device *netdev = adapter->netdev; 929 struct net_device *netdev = adapter->netdev;
880 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 930 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -902,7 +952,7 @@ netxen_process_rcv(struct netxen_adapter *adapter,
902 952
903 skb->protocol = eth_type_trans(skb, netdev); 953 skb->protocol = eth_type_trans(skb, netdev);
904 954
905 netif_receive_skb(skb); 955 napi_gro_receive(&sds_ring->napi, skb);
906 956
907 adapter->stats.no_rcv++; 957 adapter->stats.no_rcv++;
908 adapter->stats.rxbytes += length; 958 adapter->stats.rxbytes += length;
@@ -927,35 +977,53 @@ netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
927 977
928 int count = 0; 978 int count = 0;
929 u64 sts_data; 979 u64 sts_data;
930 int opcode, ring, index, length, cksum, pkt_offset; 980 int opcode, ring, index, length, cksum, pkt_offset, desc_cnt;
931 981
932 while (count < max) { 982 while (count < max) {
933 desc = &sds_ring->desc_head[consumer]; 983 desc = &sds_ring->desc_head[consumer];
934 sts_data = le64_to_cpu(desc->status_desc_data); 984 sts_data = le64_to_cpu(desc->status_desc_data[0]);
935 985
936 if (!(sts_data & STATUS_OWNER_HOST)) 986 if (!(sts_data & STATUS_OWNER_HOST))
937 break; 987 break;
938 988
989 desc_cnt = netxen_get_sts_desc_cnt(sts_data);
939 ring = netxen_get_sts_type(sts_data); 990 ring = netxen_get_sts_type(sts_data);
991
940 if (ring > RCV_RING_JUMBO) 992 if (ring > RCV_RING_JUMBO)
941 continue; 993 goto skip;
942 994
943 opcode = netxen_get_sts_opcode(sts_data); 995 opcode = netxen_get_sts_opcode(sts_data);
944 996
997 switch (opcode) {
998 case NETXEN_NIC_RXPKT_DESC:
999 case NETXEN_OLD_RXPKT_DESC:
1000 break;
1001 case NETXEN_NIC_RESPONSE_DESC:
1002 netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
1003 default:
1004 goto skip;
1005 }
1006
1007 WARN_ON(desc_cnt > 1);
1008
945 index = netxen_get_sts_refhandle(sts_data); 1009 index = netxen_get_sts_refhandle(sts_data);
946 length = netxen_get_sts_totallength(sts_data); 1010 length = netxen_get_sts_totallength(sts_data);
947 cksum = netxen_get_sts_status(sts_data); 1011 cksum = netxen_get_sts_status(sts_data);
948 pkt_offset = netxen_get_sts_pkt_offset(sts_data); 1012 pkt_offset = netxen_get_sts_pkt_offset(sts_data);
949 1013
950 rxbuf = netxen_process_rcv(adapter, ring, index, 1014 rxbuf = netxen_process_rcv(adapter, ring, index,
951 length, cksum, pkt_offset); 1015 length, cksum, pkt_offset, sds_ring);
952 1016
953 if (rxbuf) 1017 if (rxbuf)
954 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); 1018 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
955 1019
956 desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM); 1020skip:
957 1021 for (; desc_cnt > 0; desc_cnt--) {
958 consumer = get_next_index(consumer, sds_ring->num_desc); 1022 desc = &sds_ring->desc_head[consumer];
1023 desc->status_desc_data[0] =
1024 cpu_to_le64(STATUS_OWNER_PHANTOM);
1025 consumer = get_next_index(consumer, sds_ring->num_desc);
1026 }
959 count++; 1027 count++;
960 } 1028 }
961 1029
@@ -980,8 +1048,7 @@ netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
980 1048
981 if (count) { 1049 if (count) {
982 sds_ring->consumer = consumer; 1050 sds_ring->consumer = consumer;
983 adapter->pci_write_normalize(adapter, 1051 NXWR32(adapter, sds_ring->crb_sts_consumer, consumer);
984 sds_ring->crb_sts_consumer, consumer);
985 } 1052 }
986 1053
987 return count; 1054 return count;
@@ -990,23 +1057,24 @@ netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
990/* Process Command status ring */ 1057/* Process Command status ring */
991int netxen_process_cmd_ring(struct netxen_adapter *adapter) 1058int netxen_process_cmd_ring(struct netxen_adapter *adapter)
992{ 1059{
993 u32 last_consumer, consumer; 1060 u32 sw_consumer, hw_consumer;
994 int count = 0, i; 1061 int count = 0, i;
995 struct netxen_cmd_buffer *buffer; 1062 struct netxen_cmd_buffer *buffer;
996 struct pci_dev *pdev = adapter->pdev; 1063 struct pci_dev *pdev = adapter->pdev;
997 struct net_device *netdev = adapter->netdev; 1064 struct net_device *netdev = adapter->netdev;
998 struct netxen_skb_frag *frag; 1065 struct netxen_skb_frag *frag;
999 int done = 0; 1066 int done = 0;
1067 struct nx_host_tx_ring *tx_ring = &adapter->tx_ring;
1000 1068
1001 if (!spin_trylock(&adapter->tx_clean_lock)) 1069 if (!spin_trylock(&adapter->tx_clean_lock))
1002 return 1; 1070 return 1;
1003 1071
1004 last_consumer = adapter->last_cmd_consumer; 1072 sw_consumer = tx_ring->sw_consumer;
1005 barrier(); /* cmd_consumer can change underneath */ 1073 barrier(); /* hw_consumer can change underneath */
1006 consumer = le32_to_cpu(*(adapter->cmd_consumer)); 1074 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1007 1075
1008 while (last_consumer != consumer) { 1076 while (sw_consumer != hw_consumer) {
1009 buffer = &adapter->cmd_buf_arr[last_consumer]; 1077 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1010 if (buffer->skb) { 1078 if (buffer->skb) {
1011 frag = &buffer->frag_array[0]; 1079 frag = &buffer->frag_array[0];
1012 pci_unmap_single(pdev, frag->dma, frag->length, 1080 pci_unmap_single(pdev, frag->dma, frag->length,
@@ -1024,14 +1092,13 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1024 buffer->skb = NULL; 1092 buffer->skb = NULL;
1025 } 1093 }
1026 1094
1027 last_consumer = get_next_index(last_consumer, 1095 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1028 adapter->num_txd);
1029 if (++count >= MAX_STATUS_HANDLE) 1096 if (++count >= MAX_STATUS_HANDLE)
1030 break; 1097 break;
1031 } 1098 }
1032 1099
1033 if (count) { 1100 if (count) {
1034 adapter->last_cmd_consumer = last_consumer; 1101 tx_ring->sw_consumer = sw_consumer;
1035 smp_mb(); 1102 smp_mb();
1036 if (netif_queue_stopped(netdev) && netif_running(netdev)) { 1103 if (netif_queue_stopped(netdev) && netif_running(netdev)) {
1037 netif_tx_lock(netdev); 1104 netif_tx_lock(netdev);
@@ -1053,9 +1120,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1053 * There is still a possible race condition and the host could miss an 1120 * There is still a possible race condition and the host could miss an
1054 * interrupt. The card has to take care of this. 1121 * interrupt. The card has to take care of this.
1055 */ 1122 */
1056 barrier(); /* cmd_consumer can change underneath */ 1123 barrier(); /* hw_consumer can change underneath */
1057 consumer = le32_to_cpu(*(adapter->cmd_consumer)); 1124 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1058 done = (last_consumer == consumer); 1125 done = (sw_consumer == hw_consumer);
1059 spin_unlock(&adapter->tx_clean_lock); 1126 spin_unlock(&adapter->tx_clean_lock);
1060 1127
1061 return (done); 1128 return (done);
@@ -1099,8 +1166,7 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
1099 1166
1100 if (count) { 1167 if (count) {
1101 rds_ring->producer = producer; 1168 rds_ring->producer = producer;
1102 adapter->pci_write_normalize(adapter, 1169 NXWR32(adapter, rds_ring->crb_rcv_producer,
1103 rds_ring->crb_rcv_producer,
1104 (producer-1) & (rds_ring->num_desc-1)); 1170 (producer-1) & (rds_ring->num_desc-1));
1105 1171
1106 if (adapter->fw_major < 4) { 1172 if (adapter->fw_major < 4) {
@@ -1160,8 +1226,7 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1160 1226
1161 if (count) { 1227 if (count) {
1162 rds_ring->producer = producer; 1228 rds_ring->producer = producer;
1163 adapter->pci_write_normalize(adapter, 1229 NXWR32(adapter, rds_ring->crb_rcv_producer,
1164 rds_ring->crb_rcv_producer,
1165 (producer - 1) & (rds_ring->num_desc - 1)); 1230 (producer - 1) & (rds_ring->num_desc - 1));
1166 wmb(); 1231 wmb();
1167 } 1232 }
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index aef77289bd34..e877eefdfeb0 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -29,7 +29,7 @@
29 */ 29 */
30 30
31#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
32#include <linux/highmem.h> 32#include <linux/interrupt.h>
33#include "netxen_nic_hw.h" 33#include "netxen_nic_hw.h"
34 34
35#include "netxen_nic.h" 35#include "netxen_nic.h"
@@ -107,10 +107,9 @@ static uint32_t crb_cmd_producer[4] = {
107 107
108void 108void
109netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 109netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
110 uint32_t crb_producer) 110 struct nx_host_tx_ring *tx_ring, u32 producer)
111{ 111{
112 adapter->pci_write_normalize(adapter, 112 NXWR32(adapter, tx_ring->crb_cmd_producer, producer);
113 adapter->crb_addr_cmd_producer, crb_producer);
114} 113}
115 114
116static uint32_t crb_cmd_consumer[4] = { 115static uint32_t crb_cmd_consumer[4] = {
@@ -120,10 +119,9 @@ static uint32_t crb_cmd_consumer[4] = {
120 119
121static inline void 120static inline void
122netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, 121netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
123 u32 crb_consumer) 122 struct nx_host_tx_ring *tx_ring, u32 consumer)
124{ 123{
125 adapter->pci_write_normalize(adapter, 124 NXWR32(adapter, tx_ring->crb_cmd_consumer, consumer);
126 adapter->crb_addr_cmd_consumer, crb_consumer);
127} 125}
128 126
129static uint32_t msi_tgt_status[8] = { 127static uint32_t msi_tgt_status[8] = {
@@ -139,37 +137,60 @@ static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
139{ 137{
140 struct netxen_adapter *adapter = sds_ring->adapter; 138 struct netxen_adapter *adapter = sds_ring->adapter;
141 139
142 adapter->pci_write_normalize(adapter, sds_ring->crb_intr_mask, 0); 140 NXWR32(adapter, sds_ring->crb_intr_mask, 0);
143} 141}
144 142
145static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) 143static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
146{ 144{
147 struct netxen_adapter *adapter = sds_ring->adapter; 145 struct netxen_adapter *adapter = sds_ring->adapter;
148 146
149 adapter->pci_write_normalize(adapter, sds_ring->crb_intr_mask, 0x1); 147 NXWR32(adapter, sds_ring->crb_intr_mask, 0x1);
150 148
151 if (!NETXEN_IS_MSI_FAMILY(adapter)) 149 if (!NETXEN_IS_MSI_FAMILY(adapter))
152 adapter->pci_write_immediate(adapter, 150 adapter->pci_write_immediate(adapter,
153 adapter->legacy_intr.tgt_mask_reg, 0xfbff); 151 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
154} 152}
155 153
154static int
155netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
156{
157 int size = sizeof(struct nx_host_sds_ring) * count;
158
159 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
160
161 return (recv_ctx->sds_rings == NULL);
162}
163
156static void 164static void
165netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
166{
167 if (recv_ctx->sds_rings != NULL)
168 kfree(recv_ctx->sds_rings);
169}
170
171static int
157netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) 172netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
158{ 173{
159 int ring; 174 int ring;
160 struct nx_host_sds_ring *sds_ring; 175 struct nx_host_sds_ring *sds_ring;
161 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 176 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
162 177
163 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) 178 if ((adapter->flags & NETXEN_NIC_MSIX_ENABLED) &&
179 adapter->rss_supported)
164 adapter->max_sds_rings = (num_online_cpus() >= 4) ? 4 : 2; 180 adapter->max_sds_rings = (num_online_cpus() >= 4) ? 4 : 2;
165 else 181 else
166 adapter->max_sds_rings = 1; 182 adapter->max_sds_rings = 1;
167 183
184 if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
185 return 1;
186
168 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 187 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
169 sds_ring = &recv_ctx->sds_rings[ring]; 188 sds_ring = &recv_ctx->sds_rings[ring];
170 netif_napi_add(netdev, &sds_ring->napi, 189 netif_napi_add(netdev, &sds_ring->napi,
171 netxen_nic_poll, NETXEN_NETDEV_WEIGHT); 190 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
172 } 191 }
192
193 return 0;
173} 194}
174 195
175static void 196static void
@@ -240,7 +261,7 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
240 261
241 change = 0; 262 change = 0;
242 263
243 shift = netxen_nic_reg_read(adapter, CRB_DMA_SHIFT); 264 shift = NXRD32(adapter, CRB_DMA_SHIFT);
244 if (shift >= 32) 265 if (shift >= 32)
245 return 0; 266 return 0;
246 267
@@ -268,10 +289,22 @@ static void netxen_check_options(struct netxen_adapter *adapter)
268 else if (adapter->ahw.port_type == NETXEN_NIC_GBE) 289 else if (adapter->ahw.port_type == NETXEN_NIC_GBE)
269 adapter->num_rxd = MAX_RCV_DESCRIPTORS_1G; 290 adapter->num_rxd = MAX_RCV_DESCRIPTORS_1G;
270 291
271 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 292 adapter->msix_supported = 0;
293 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
272 adapter->msix_supported = !!use_msi_x; 294 adapter->msix_supported = !!use_msi_x;
273 else 295 adapter->rss_supported = !!use_msi_x;
274 adapter->msix_supported = 0; 296 } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
297 switch (adapter->ahw.board_type) {
298 case NETXEN_BRDTYPE_P2_SB31_10G:
299 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
300 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
301 adapter->msix_supported = !!use_msi_x;
302 adapter->rss_supported = !!use_msi_x;
303 break;
304 default:
305 break;
306 }
307 }
275 308
276 adapter->num_txd = MAX_CMD_DESCRIPTORS_HOST; 309 adapter->num_txd = MAX_CMD_DESCRIPTORS_HOST;
277 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS; 310 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS;
@@ -287,43 +320,34 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
287 320
288 if (first_boot == 0x55555555) { 321 if (first_boot == 0x55555555) {
289 /* This is the first boot after power up */ 322 /* This is the first boot after power up */
290 adapter->pci_write_normalize(adapter, 323 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
291 NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
292 324
293 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) 325 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
294 return 0; 326 return 0;
295 327
296 /* PCI bus master workaround */ 328 /* PCI bus master workaround */
297 adapter->hw_read_wx(adapter, 329 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
298 NETXEN_PCIE_REG(0x4), &first_boot, 4);
299 if (!(first_boot & 0x4)) { 330 if (!(first_boot & 0x4)) {
300 first_boot |= 0x4; 331 first_boot |= 0x4;
301 adapter->hw_write_wx(adapter, 332 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
302 NETXEN_PCIE_REG(0x4), &first_boot, 4); 333 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
303 adapter->hw_read_wx(adapter,
304 NETXEN_PCIE_REG(0x4), &first_boot, 4);
305 } 334 }
306 335
307 /* This is the first boot after power up */ 336 /* This is the first boot after power up */
308 adapter->hw_read_wx(adapter, 337 first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
309 NETXEN_ROMUSB_GLB_SW_RESET, &first_boot, 4);
310 if (first_boot != 0x80000f) { 338 if (first_boot != 0x80000f) {
311 /* clear the register for future unloads/loads */ 339 /* clear the register for future unloads/loads */
312 adapter->pci_write_normalize(adapter, 340 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
313 NETXEN_CAM_RAM(0x1fc), 0);
314 return -EIO; 341 return -EIO;
315 } 342 }
316 343
317 /* Start P2 boot loader */ 344 /* Start P2 boot loader */
318 val = adapter->pci_read_normalize(adapter, 345 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
319 NETXEN_ROMUSB_GLB_PEGTUNE_DONE); 346 NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
320 adapter->pci_write_normalize(adapter,
321 NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
322 timeout = 0; 347 timeout = 0;
323 do { 348 do {
324 msleep(1); 349 msleep(1);
325 val = adapter->pci_read_normalize(adapter, 350 val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
326 NETXEN_CAM_RAM(0x1fc));
327 351
328 if (++timeout > 5000) 352 if (++timeout > 5000)
329 return -EIO; 353 return -EIO;
@@ -342,24 +366,19 @@ static void netxen_set_port_mode(struct netxen_adapter *adapter)
342 (val == NETXEN_BRDTYPE_P3_XG_LOM)) { 366 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
343 if (port_mode == NETXEN_PORT_MODE_802_3_AP) { 367 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
344 data = NETXEN_PORT_MODE_802_3_AP; 368 data = NETXEN_PORT_MODE_802_3_AP;
345 adapter->hw_write_wx(adapter, 369 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
346 NETXEN_PORT_MODE_ADDR, &data, 4);
347 } else if (port_mode == NETXEN_PORT_MODE_XG) { 370 } else if (port_mode == NETXEN_PORT_MODE_XG) {
348 data = NETXEN_PORT_MODE_XG; 371 data = NETXEN_PORT_MODE_XG;
349 adapter->hw_write_wx(adapter, 372 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
350 NETXEN_PORT_MODE_ADDR, &data, 4);
351 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { 373 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
352 data = NETXEN_PORT_MODE_AUTO_NEG_1G; 374 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
353 adapter->hw_write_wx(adapter, 375 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
354 NETXEN_PORT_MODE_ADDR, &data, 4);
355 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { 376 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
356 data = NETXEN_PORT_MODE_AUTO_NEG_XG; 377 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
357 adapter->hw_write_wx(adapter, 378 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
358 NETXEN_PORT_MODE_ADDR, &data, 4);
359 } else { 379 } else {
360 data = NETXEN_PORT_MODE_AUTO_NEG; 380 data = NETXEN_PORT_MODE_AUTO_NEG;
361 adapter->hw_write_wx(adapter, 381 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
362 NETXEN_PORT_MODE_ADDR, &data, 4);
363 } 382 }
364 383
365 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && 384 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
@@ -368,8 +387,7 @@ static void netxen_set_port_mode(struct netxen_adapter *adapter)
368 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { 387 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
369 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; 388 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
370 } 389 }
371 adapter->hw_write_wx(adapter, NETXEN_WOL_PORT_MODE, 390 NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
372 &wol_port_mode, 4);
373 } 391 }
374} 392}
375 393
@@ -462,8 +480,6 @@ netxen_setup_intr(struct netxen_adapter *adapter)
462 struct pci_dev *pdev = adapter->pdev; 480 struct pci_dev *pdev = adapter->pdev;
463 481
464 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); 482 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
465 adapter->intr_scheme = -1;
466 adapter->msi_mode = -1;
467 483
468 if (adapter->ahw.revision_id >= NX_P3_B0) 484 if (adapter->ahw.revision_id >= NX_P3_B0)
469 legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; 485 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
@@ -552,8 +568,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
552 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M; 568 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
553 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M; 569 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
554 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M; 570 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
555 adapter->pci_read_normalize = netxen_nic_pci_read_normalize_128M;
556 adapter->pci_write_normalize = netxen_nic_pci_write_normalize_128M;
557 adapter->pci_set_window = netxen_nic_pci_set_window_128M; 571 adapter->pci_set_window = netxen_nic_pci_set_window_128M;
558 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M; 572 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
559 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M; 573 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
@@ -575,9 +589,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
575 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M; 589 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
576 adapter->pci_write_immediate = 590 adapter->pci_write_immediate =
577 netxen_nic_pci_write_immediate_2M; 591 netxen_nic_pci_write_immediate_2M;
578 adapter->pci_read_normalize = netxen_nic_pci_read_normalize_2M;
579 adapter->pci_write_normalize =
580 netxen_nic_pci_write_normalize_2M;
581 adapter->pci_set_window = netxen_nic_pci_set_window_2M; 592 adapter->pci_set_window = netxen_nic_pci_set_window_2M;
582 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M; 593 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
583 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M; 594 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
@@ -660,8 +671,7 @@ netxen_start_firmware(struct netxen_adapter *adapter)
660 if (!first_driver) 671 if (!first_driver)
661 return 0; 672 return 0;
662 673
663 first_boot = adapter->pci_read_normalize(adapter, 674 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
664 NETXEN_CAM_RAM(0x1fc));
665 675
666 err = netxen_check_hw_init(adapter, first_boot); 676 err = netxen_check_hw_init(adapter, first_boot);
667 if (err) { 677 if (err) {
@@ -670,13 +680,12 @@ netxen_start_firmware(struct netxen_adapter *adapter)
670 } 680 }
671 681
672 if (first_boot != 0x55555555) { 682 if (first_boot != 0x55555555) {
673 adapter->pci_write_normalize(adapter, 683 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
674 CRB_CMDPEG_STATE, 0);
675 netxen_pinit_from_rom(adapter, 0); 684 netxen_pinit_from_rom(adapter, 0);
676 msleep(1); 685 msleep(1);
677 } 686 }
678 687
679 netxen_nic_reg_write(adapter, CRB_DMA_SHIFT, 0x55555555); 688 NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
680 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 689 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
681 netxen_set_port_mode(adapter); 690 netxen_set_port_mode(adapter);
682 691
@@ -688,8 +697,7 @@ netxen_start_firmware(struct netxen_adapter *adapter)
688 val = 0x7654; 697 val = 0x7654;
689 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) 698 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
690 val |= 0x0f000000; 699 val |= 0x0f000000;
691 netxen_crb_writelit_adapter(adapter, 700 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
692 NETXEN_MAC_ADDR_CNTL_REG, val);
693 701
694 } 702 }
695 703
@@ -703,7 +711,7 @@ netxen_start_firmware(struct netxen_adapter *adapter)
703 val = (_NETXEN_NIC_LINUX_MAJOR << 16) 711 val = (_NETXEN_NIC_LINUX_MAJOR << 16)
704 | ((_NETXEN_NIC_LINUX_MINOR << 8)) 712 | ((_NETXEN_NIC_LINUX_MINOR << 8))
705 | (_NETXEN_NIC_LINUX_SUBVERSION); 713 | (_NETXEN_NIC_LINUX_SUBVERSION);
706 adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, val); 714 NXWR32(adapter, CRB_DRIVER_VERSION, val);
707 715
708 /* Handshake with the card before we register the devices. */ 716 /* Handshake with the card before we register the devices. */
709 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 717 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
@@ -726,15 +734,6 @@ netxen_nic_request_irq(struct netxen_adapter *adapter)
726 struct net_device *netdev = adapter->netdev; 734 struct net_device *netdev = adapter->netdev;
727 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 735 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
728 736
729 if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) ||
730 (adapter->intr_scheme != INTR_SCHEME_PERPORT)) {
731 printk(KERN_ERR "%s: Firmware interrupt scheme is "
732 "incompatible with driver\n",
733 netdev->name);
734 adapter->driver_mismatch = 1;
735 return -EINVAL;
736 }
737
738 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) 737 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
739 handler = netxen_msix_intr; 738 handler = netxen_msix_intr;
740 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) 739 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
@@ -798,6 +797,9 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
798 if (adapter->max_sds_rings > 1) 797 if (adapter->max_sds_rings > 1)
799 netxen_config_rss(adapter, 1); 798 netxen_config_rss(adapter, 1);
800 799
800 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
801 netxen_linkevent_request(adapter, 1);
802
801 return 0; 803 return 0;
802} 804}
803 805
@@ -825,6 +827,7 @@ netxen_nic_attach(struct netxen_adapter *adapter)
825 struct pci_dev *pdev = adapter->pdev; 827 struct pci_dev *pdev = adapter->pdev;
826 int err, ring; 828 int err, ring;
827 struct nx_host_rds_ring *rds_ring; 829 struct nx_host_rds_ring *rds_ring;
830 struct nx_host_tx_ring *tx_ring;
828 831
829 err = netxen_init_firmware(adapter); 832 err = netxen_init_firmware(adapter);
830 if (err != 0) { 833 if (err != 0) {
@@ -854,13 +857,12 @@ netxen_nic_attach(struct netxen_adapter *adapter)
854 } 857 }
855 858
856 if (adapter->fw_major < 4) { 859 if (adapter->fw_major < 4) {
857 adapter->crb_addr_cmd_producer = 860 tx_ring = &adapter->tx_ring;
858 crb_cmd_producer[adapter->portnum]; 861 tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum];
859 adapter->crb_addr_cmd_consumer = 862 tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum];
860 crb_cmd_consumer[adapter->portnum];
861 863
862 netxen_nic_update_cmd_producer(adapter, 0); 864 netxen_nic_update_cmd_producer(adapter, tx_ring, 0);
863 netxen_nic_update_cmd_consumer(adapter, 0); 865 netxen_nic_update_cmd_consumer(adapter, tx_ring, 0);
864 } 866 }
865 867
866 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 868 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -979,6 +981,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
979 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 981 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
980 982
981 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 983 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
984 netdev->features |= (NETIF_F_GRO);
982 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 985 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
983 986
984 if (NX_IS_REVISION_P3(revision_id)) { 987 if (NX_IS_REVISION_P3(revision_id)) {
@@ -1024,8 +1027,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1024 */ 1027 */
1025 adapter->physical_port = adapter->portnum; 1028 adapter->physical_port = adapter->portnum;
1026 if (adapter->fw_major < 4) { 1029 if (adapter->fw_major < 4) {
1027 i = adapter->pci_read_normalize(adapter, 1030 i = NXRD32(adapter, CRB_V2P(adapter->portnum));
1028 CRB_V2P(adapter->portnum));
1029 if (i != 0x55555555) 1031 if (i != 0x55555555)
1030 adapter->physical_port = i; 1032 adapter->physical_port = i;
1031 } 1033 }
@@ -1036,10 +1038,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1036 1038
1037 netdev->irq = adapter->msix_entries[0].vector; 1039 netdev->irq = adapter->msix_entries[0].vector;
1038 1040
1039 netxen_napi_add(adapter, netdev); 1041 if (netxen_napi_add(adapter, netdev))
1040
1041 err = netxen_receive_peg_ready(adapter);
1042 if (err)
1043 goto err_out_disable_msi; 1042 goto err_out_disable_msi;
1044 1043
1045 init_timer(&adapter->watchdog_timer); 1044 init_timer(&adapter->watchdog_timer);
@@ -1122,6 +1121,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1122 netxen_free_adapter_offload(adapter); 1121 netxen_free_adapter_offload(adapter);
1123 1122
1124 netxen_teardown_intr(adapter); 1123 netxen_teardown_intr(adapter);
1124 netxen_free_sds_rings(&adapter->recv_ctx);
1125 1125
1126 netxen_cleanup_pci_map(adapter); 1126 netxen_cleanup_pci_map(adapter);
1127 1127
@@ -1315,7 +1315,7 @@ static int
1315netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1315netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1316{ 1316{
1317 struct netxen_adapter *adapter = netdev_priv(netdev); 1317 struct netxen_adapter *adapter = netdev_priv(netdev);
1318 struct netxen_hardware_context *hw = &adapter->ahw; 1318 struct nx_host_tx_ring *tx_ring = &adapter->tx_ring;
1319 unsigned int first_seg_len = skb->len - skb->data_len; 1319 unsigned int first_seg_len = skb->len - skb->data_len;
1320 struct netxen_cmd_buffer *pbuf; 1320 struct netxen_cmd_buffer *pbuf;
1321 struct netxen_skb_frag *buffrag; 1321 struct netxen_skb_frag *buffrag;
@@ -1326,28 +1326,26 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1326 1326
1327 u32 producer, consumer; 1327 u32 producer, consumer;
1328 int frag_count, no_of_desc; 1328 int frag_count, no_of_desc;
1329 u32 num_txd = adapter->num_txd; 1329 u32 num_txd = tx_ring->num_desc;
1330 bool is_tso = false; 1330 bool is_tso = false;
1331 1331
1332 frag_count = skb_shinfo(skb)->nr_frags + 1; 1332 frag_count = skb_shinfo(skb)->nr_frags + 1;
1333 1333
1334 /* There 4 fragments per descriptor */ 1334 /* 4 fragments per cmd des */
1335 no_of_desc = (frag_count + 3) >> 2; 1335 no_of_desc = (frag_count + 3) >> 2;
1336 1336
1337 producer = adapter->cmd_producer; 1337 producer = tx_ring->producer;
1338 smp_mb(); 1338 smp_mb();
1339 consumer = adapter->last_cmd_consumer; 1339 consumer = tx_ring->sw_consumer;
1340 if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) { 1340 if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) {
1341 netif_stop_queue(netdev); 1341 netif_stop_queue(netdev);
1342 smp_mb(); 1342 smp_mb();
1343 return NETDEV_TX_BUSY; 1343 return NETDEV_TX_BUSY;
1344 } 1344 }
1345 1345
1346 /* Copy the descriptors into the hardware */ 1346 hwdesc = &tx_ring->desc_head[producer];
1347 hwdesc = &hw->cmd_desc_head[producer];
1348 netxen_clear_cmddesc((u64 *)hwdesc); 1347 netxen_clear_cmddesc((u64 *)hwdesc);
1349 /* Take skb->data itself */ 1348 pbuf = &tx_ring->cmd_buf_arr[producer];
1350 pbuf = &adapter->cmd_buf_arr[producer];
1351 1349
1352 is_tso = netxen_tso_check(netdev, hwdesc, skb); 1350 is_tso = netxen_tso_check(netdev, hwdesc, skb);
1353 1351
@@ -1376,9 +1374,9 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1376 if ((i & 0x3) == 0) { 1374 if ((i & 0x3) == 0) {
1377 k = 0; 1375 k = 0;
1378 producer = get_next_index(producer, num_txd); 1376 producer = get_next_index(producer, num_txd);
1379 hwdesc = &hw->cmd_desc_head[producer]; 1377 hwdesc = &tx_ring->desc_head[producer];
1380 netxen_clear_cmddesc((u64 *)hwdesc); 1378 netxen_clear_cmddesc((u64 *)hwdesc);
1381 pbuf = &adapter->cmd_buf_arr[producer]; 1379 pbuf = &tx_ring->cmd_buf_arr[producer];
1382 pbuf->skb = NULL; 1380 pbuf->skb = NULL;
1383 } 1381 }
1384 frag = &skb_shinfo(skb)->frags[i - 1]; 1382 frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1430,8 +1428,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1430 more_hdr = 0; 1428 more_hdr = 0;
1431 } 1429 }
1432 /* copy the MAC/IP/TCP headers to the cmd descriptor list */ 1430 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
1433 hwdesc = &hw->cmd_desc_head[producer]; 1431 hwdesc = &tx_ring->desc_head[producer];
1434 pbuf = &adapter->cmd_buf_arr[producer]; 1432 pbuf = &tx_ring->cmd_buf_arr[producer];
1435 pbuf->skb = NULL; 1433 pbuf->skb = NULL;
1436 1434
1437 /* copy the first 64 bytes */ 1435 /* copy the first 64 bytes */
@@ -1440,8 +1438,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1440 producer = get_next_index(producer, num_txd); 1438 producer = get_next_index(producer, num_txd);
1441 1439
1442 if (more_hdr) { 1440 if (more_hdr) {
1443 hwdesc = &hw->cmd_desc_head[producer]; 1441 hwdesc = &tx_ring->desc_head[producer];
1444 pbuf = &adapter->cmd_buf_arr[producer]; 1442 pbuf = &tx_ring->cmd_buf_arr[producer];
1445 pbuf->skb = NULL; 1443 pbuf->skb = NULL;
1446 /* copy the next 64 bytes - should be enough except 1444 /* copy the next 64 bytes - should be enough except
1447 * for pathological case 1445 * for pathological case
@@ -1454,10 +1452,10 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1454 } 1452 }
1455 } 1453 }
1456 1454
1457 adapter->cmd_producer = producer; 1455 tx_ring->producer = producer;
1458 adapter->stats.txbytes += skb->len; 1456 adapter->stats.txbytes += skb->len;
1459 1457
1460 netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer); 1458 netxen_nic_update_cmd_producer(adapter, tx_ring, producer);
1461 1459
1462 adapter->stats.xmitcalled++; 1460 adapter->stats.xmitcalled++;
1463 netdev->trans_start = jiffies; 1461 netdev->trans_start = jiffies;
@@ -1476,7 +1474,7 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1476 uint32_t temp, temp_state, temp_val; 1474 uint32_t temp, temp_state, temp_val;
1477 int rv = 0; 1475 int rv = 0;
1478 1476
1479 temp = adapter->pci_read_normalize(adapter, CRB_TEMP_STATE); 1477 temp = NXRD32(adapter, CRB_TEMP_STATE);
1480 1478
1481 temp_state = nx_get_temp_state(temp); 1479 temp_state = nx_get_temp_state(temp);
1482 temp_val = nx_get_temp_val(temp); 1480 temp_val = nx_get_temp_val(temp);
@@ -1510,26 +1508,9 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1510 return rv; 1508 return rv;
1511} 1509}
1512 1510
1513static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) 1511void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
1514{ 1512{
1515 struct net_device *netdev = adapter->netdev; 1513 struct net_device *netdev = adapter->netdev;
1516 u32 val, port, linkup;
1517
1518 port = adapter->physical_port;
1519
1520 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1521 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3);
1522 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1523 linkup = (val == XG_LINK_UP_P3);
1524 } else {
1525 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
1526 if (adapter->ahw.port_type == NETXEN_NIC_GBE)
1527 linkup = (val >> port) & 1;
1528 else {
1529 val = (val >> port*8) & 0xff;
1530 linkup = (val == XG_LINK_UP);
1531 }
1532 }
1533 1514
1534 if (adapter->ahw.linkup && !linkup) { 1515 if (adapter->ahw.linkup && !linkup) {
1535 printk(KERN_INFO "%s: %s NIC Link is down\n", 1516 printk(KERN_INFO "%s: %s NIC Link is down\n",
@@ -1540,7 +1521,9 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1540 netif_stop_queue(netdev); 1521 netif_stop_queue(netdev);
1541 } 1522 }
1542 1523
1543 netxen_nic_set_link_parameters(adapter); 1524 if (!adapter->has_link_events)
1525 netxen_nic_set_link_parameters(adapter);
1526
1544 } else if (!adapter->ahw.linkup && linkup) { 1527 } else if (!adapter->ahw.linkup && linkup) {
1545 printk(KERN_INFO "%s: %s NIC Link is up\n", 1528 printk(KERN_INFO "%s: %s NIC Link is up\n",
1546 netxen_nic_driver_name, netdev->name); 1529 netxen_nic_driver_name, netdev->name);
@@ -1550,10 +1533,34 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1550 netif_wake_queue(netdev); 1533 netif_wake_queue(netdev);
1551 } 1534 }
1552 1535
1553 netxen_nic_set_link_parameters(adapter); 1536 if (!adapter->has_link_events)
1537 netxen_nic_set_link_parameters(adapter);
1554 } 1538 }
1555} 1539}
1556 1540
1541static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1542{
1543 u32 val, port, linkup;
1544
1545 port = adapter->physical_port;
1546
1547 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1548 val = NXRD32(adapter, CRB_XG_STATE_P3);
1549 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1550 linkup = (val == XG_LINK_UP_P3);
1551 } else {
1552 val = NXRD32(adapter, CRB_XG_STATE);
1553 if (adapter->ahw.port_type == NETXEN_NIC_GBE)
1554 linkup = (val >> port) & 1;
1555 else {
1556 val = (val >> port*8) & 0xff;
1557 linkup = (val == XG_LINK_UP);
1558 }
1559 }
1560
1561 netxen_advert_link_change(adapter, linkup);
1562}
1563
1557static void netxen_watchdog(unsigned long v) 1564static void netxen_watchdog(unsigned long v)
1558{ 1565{
1559 struct netxen_adapter *adapter = (struct netxen_adapter *)v; 1566 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
@@ -1569,7 +1576,8 @@ void netxen_watchdog_task(struct work_struct *work)
1569 if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter)) 1576 if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter))
1570 return; 1577 return;
1571 1578
1572 netxen_nic_handle_phy_intr(adapter); 1579 if (!adapter->has_link_events)
1580 netxen_nic_handle_phy_intr(adapter);
1573 1581
1574 if (netif_running(adapter->netdev)) 1582 if (netif_running(adapter->netdev))
1575 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 1583 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
@@ -1598,10 +1606,6 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1598 netif_wake_queue(adapter->netdev); 1606 netif_wake_queue(adapter->netdev);
1599} 1607}
1600 1608
1601/*
1602 * netxen_nic_get_stats - Get System Network Statistics
1603 * @netdev: network interface device structure
1604 */
1605struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) 1609struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1606{ 1610{
1607 struct netxen_adapter *adapter = netdev_priv(netdev); 1611 struct netxen_adapter *adapter = netdev_priv(netdev);
@@ -1609,22 +1613,11 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1609 1613
1610 memset(stats, 0, sizeof(*stats)); 1614 memset(stats, 0, sizeof(*stats));
1611 1615
1612 /* total packets received */
1613 stats->rx_packets = adapter->stats.no_rcv; 1616 stats->rx_packets = adapter->stats.no_rcv;
1614 /* total packets transmitted */ 1617 stats->tx_packets = adapter->stats.xmitfinished;
1615 stats->tx_packets = adapter->stats.xmitedframes +
1616 adapter->stats.xmitfinished;
1617 /* total bytes received */
1618 stats->rx_bytes = adapter->stats.rxbytes; 1618 stats->rx_bytes = adapter->stats.rxbytes;
1619 /* total bytes transmitted */
1620 stats->tx_bytes = adapter->stats.txbytes; 1619 stats->tx_bytes = adapter->stats.txbytes;
1621 /* bad packets received */
1622 stats->rx_errors = adapter->stats.rcvdbadskb;
1623 /* packet transmit problems */
1624 stats->tx_errors = adapter->stats.nocmddescriptor;
1625 /* no space in linux buffers */
1626 stats->rx_dropped = adapter->stats.rxdropped; 1620 stats->rx_dropped = adapter->stats.rxdropped;
1627 /* no space available in linux */
1628 stats->tx_dropped = adapter->stats.txdropped; 1621 stats->tx_dropped = adapter->stats.txdropped;
1629 1622
1630 return stats; 1623 return stats;
@@ -1651,15 +1644,14 @@ static irqreturn_t netxen_intr(int irq, void *data)
1651 } else { 1644 } else {
1652 unsigned long our_int = 0; 1645 unsigned long our_int = 0;
1653 1646
1654 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); 1647 our_int = NXRD32(adapter, CRB_INT_VECTOR);
1655 1648
1656 /* not our interrupt */ 1649 /* not our interrupt */
1657 if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) 1650 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1658 return IRQ_NONE; 1651 return IRQ_NONE;
1659 1652
1660 /* claim interrupt */ 1653 /* claim interrupt */
1661 adapter->pci_write_normalize(adapter, 1654 NXWR32(adapter, CRB_INT_VECTOR, (our_int & 0xffffffff));
1662 CRB_INT_VECTOR, (our_int & 0xffffffff));
1663 } 1655 }
1664 1656
1665 /* clear interrupt */ 1657 /* clear interrupt */
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index d85203203d4d..5e2698bf575a 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -43,8 +43,7 @@ static int phy_lock(struct netxen_adapter *adapter)
43 int done = 0, timeout = 0; 43 int done = 0, timeout = 0;
44 44
45 while (!done) { 45 while (!done) {
46 done = netxen_nic_reg_read(adapter, 46 done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM3_LOCK));
47 NETXEN_PCIE_REG(PCIE_SEM3_LOCK));
48 if (done == 1) 47 if (done == 1)
49 break; 48 break;
50 if (timeout >= phy_lock_timeout) { 49 if (timeout >= phy_lock_timeout) {
@@ -59,8 +58,7 @@ static int phy_lock(struct netxen_adapter *adapter)
59 } 58 }
60 } 59 }
61 60
62 netxen_crb_writelit_adapter(adapter, 61 NXWR32(adapter, NETXEN_PHY_LOCK_ID, PHY_LOCK_DRIVER);
63 NETXEN_PHY_LOCK_ID, PHY_LOCK_DRIVER);
64 return 0; 62 return 0;
65} 63}
66 64
@@ -105,9 +103,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
105 * so it cannot be in reset 103 * so it cannot be in reset
106 */ 104 */
107 105
108 if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), 106 mac_cfg0 = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0));
109 &mac_cfg0, 4))
110 return -EIO;
111 if (netxen_gb_get_soft_reset(mac_cfg0)) { 107 if (netxen_gb_get_soft_reset(mac_cfg0)) {
112 __u32 temp; 108 __u32 temp;
113 temp = 0; 109 temp = 0;
@@ -115,9 +111,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
115 netxen_gb_rx_reset_pb(temp); 111 netxen_gb_rx_reset_pb(temp);
116 netxen_gb_tx_reset_mac(temp); 112 netxen_gb_tx_reset_mac(temp);
117 netxen_gb_rx_reset_mac(temp); 113 netxen_gb_rx_reset_mac(temp);
118 if (adapter->hw_write_wx(adapter, 114 if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), temp))
119 NETXEN_NIU_GB_MAC_CONFIG_0(0),
120 &temp, 4))
121 return -EIO; 115 return -EIO;
122 restore = 1; 116 restore = 1;
123 } 117 }
@@ -125,43 +119,32 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
125 address = 0; 119 address = 0;
126 netxen_gb_mii_mgmt_reg_addr(address, reg); 120 netxen_gb_mii_mgmt_reg_addr(address, reg);
127 netxen_gb_mii_mgmt_phy_addr(address, phy); 121 netxen_gb_mii_mgmt_phy_addr(address, phy);
128 if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), 122 if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), address))
129 &address, 4))
130 return -EIO; 123 return -EIO;
131 command = 0; /* turn off any prior activity */ 124 command = 0; /* turn off any prior activity */
132 if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), 125 if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), command))
133 &command, 4))
134 return -EIO; 126 return -EIO;
135 /* send read command */ 127 /* send read command */
136 netxen_gb_mii_mgmt_set_read_cycle(command); 128 netxen_gb_mii_mgmt_set_read_cycle(command);
137 if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), 129 if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), command))
138 &command, 4))
139 return -EIO; 130 return -EIO;
140 131
141 status = 0; 132 status = 0;
142 do { 133 do {
143 if (adapter->hw_read_wx(adapter, 134 status = NXRD32(adapter, NETXEN_NIU_GB_MII_MGMT_INDICATE(0));
144 NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
145 &status, 4))
146 return -EIO;
147 timeout++; 135 timeout++;
148 } while ((netxen_get_gb_mii_mgmt_busy(status) 136 } while ((netxen_get_gb_mii_mgmt_busy(status)
149 || netxen_get_gb_mii_mgmt_notvalid(status)) 137 || netxen_get_gb_mii_mgmt_notvalid(status))
150 && (timeout++ < NETXEN_NIU_PHY_WAITMAX)); 138 && (timeout++ < NETXEN_NIU_PHY_WAITMAX));
151 139
152 if (timeout < NETXEN_NIU_PHY_WAITMAX) { 140 if (timeout < NETXEN_NIU_PHY_WAITMAX) {
153 if (adapter->hw_read_wx(adapter, 141 *readval = NXRD32(adapter, NETXEN_NIU_GB_MII_MGMT_STATUS(0));
154 NETXEN_NIU_GB_MII_MGMT_STATUS(0),
155 readval, 4))
156 return -EIO;
157 result = 0; 142 result = 0;
158 } else 143 } else
159 result = -1; 144 result = -1;
160 145
161 if (restore) 146 if (restore)
162 if (adapter->hw_write_wx(adapter, 147 if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), mac_cfg0))
163 NETXEN_NIU_GB_MAC_CONFIG_0(0),
164 &mac_cfg0, 4))
165 return -EIO; 148 return -EIO;
166 phy_unlock(adapter); 149 phy_unlock(adapter);
167 return result; 150 return result;
@@ -197,9 +180,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
197 * cannot be in reset 180 * cannot be in reset
198 */ 181 */
199 182
200 if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), 183 mac_cfg0 = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0));
201 &mac_cfg0, 4))
202 return -EIO;
203 if (netxen_gb_get_soft_reset(mac_cfg0)) { 184 if (netxen_gb_get_soft_reset(mac_cfg0)) {
204 __u32 temp; 185 __u32 temp;
205 temp = 0; 186 temp = 0;
@@ -208,35 +189,27 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
208 netxen_gb_tx_reset_mac(temp); 189 netxen_gb_tx_reset_mac(temp);
209 netxen_gb_rx_reset_mac(temp); 190 netxen_gb_rx_reset_mac(temp);
210 191
211 if (adapter->hw_write_wx(adapter, 192 if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), temp))
212 NETXEN_NIU_GB_MAC_CONFIG_0(0),
213 &temp, 4))
214 return -EIO; 193 return -EIO;
215 restore = 1; 194 restore = 1;
216 } 195 }
217 196
218 command = 0; /* turn off any prior activity */ 197 command = 0; /* turn off any prior activity */
219 if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), 198 if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), command))
220 &command, 4))
221 return -EIO; 199 return -EIO;
222 200
223 address = 0; 201 address = 0;
224 netxen_gb_mii_mgmt_reg_addr(address, reg); 202 netxen_gb_mii_mgmt_reg_addr(address, reg);
225 netxen_gb_mii_mgmt_phy_addr(address, phy); 203 netxen_gb_mii_mgmt_phy_addr(address, phy);
226 if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), 204 if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), address))
227 &address, 4))
228 return -EIO; 205 return -EIO;
229 206
230 if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0), 207 if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0), val))
231 &val, 4))
232 return -EIO; 208 return -EIO;
233 209
234 status = 0; 210 status = 0;
235 do { 211 do {
236 if (adapter->hw_read_wx(adapter, 212 status = NXRD32(adapter, NETXEN_NIU_GB_MII_MGMT_INDICATE(0));
237 NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
238 &status, 4))
239 return -EIO;
240 timeout++; 213 timeout++;
241 } while ((netxen_get_gb_mii_mgmt_busy(status)) 214 } while ((netxen_get_gb_mii_mgmt_busy(status))
242 && (timeout++ < NETXEN_NIU_PHY_WAITMAX)); 215 && (timeout++ < NETXEN_NIU_PHY_WAITMAX));
@@ -248,9 +221,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
248 221
249 /* restore the state of port 0 MAC in case we tampered with it */ 222 /* restore the state of port 0 MAC in case we tampered with it */
250 if (restore) 223 if (restore)
251 if (adapter->hw_write_wx(adapter, 224 if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), mac_cfg0))
252 NETXEN_NIU_GB_MAC_CONFIG_0(0),
253 &mac_cfg0, 4))
254 return -EIO; 225 return -EIO;
255 226
256 return result; 227 return result;
@@ -258,7 +229,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
258 229
259int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter) 230int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter)
260{ 231{
261 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x3f); 232 NXWR32(adapter, NETXEN_NIU_INT_MASK, 0x3f);
262 return 0; 233 return 0;
263} 234}
264 235
@@ -281,7 +252,7 @@ int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter)
281 252
282int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter) 253int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter)
283{ 254{
284 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x7f); 255 NXWR32(adapter, NETXEN_NIU_INT_MASK, 0x7f);
285 return 0; 256 return 0;
286} 257}
287 258
@@ -315,36 +286,27 @@ static int netxen_niu_gbe_clear_phy_interrupts(struct netxen_adapter *adapter)
315static void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter, 286static void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter,
316 int port, long enable) 287 int port, long enable)
317{ 288{
318 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2); 289 NXWR32(adapter, NETXEN_NIU_MODE, 0x2);
319 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 290 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x80000000);
320 0x80000000); 291 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x0000f0025);
321 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 292 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port), 0xf1ff);
322 0x0000f0025); 293 NXWR32(adapter, NETXEN_NIU_GB0_GMII_MODE + (port << 3), 0);
323 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port), 294 NXWR32(adapter, NETXEN_NIU_GB0_MII_MODE + (port << 3), 1);
324 0xf1ff); 295 NXWR32(adapter, (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
325 netxen_crb_writelit_adapter(adapter, 296 NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
326 NETXEN_NIU_GB0_GMII_MODE + (port << 3), 0);
327 netxen_crb_writelit_adapter(adapter,
328 NETXEN_NIU_GB0_MII_MODE + (port << 3), 1);
329 netxen_crb_writelit_adapter(adapter,
330 (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
331 netxen_crb_writelit_adapter(adapter,
332 NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
333 297
334 if (enable) { 298 if (enable) {
335 /* 299 /*
336 * Do NOT enable flow control until a suitable solution for 300 * Do NOT enable flow control until a suitable solution for
337 * shutting down pause frames is found. 301 * shutting down pause frames is found.
338 */ 302 */
339 netxen_crb_writelit_adapter(adapter, 303 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x5);
340 NETXEN_NIU_GB_MAC_CONFIG_0(port),
341 0x5);
342 } 304 }
343 305
344 if (netxen_niu_gbe_enable_phy_interrupts(adapter)) 306 if (netxen_niu_gbe_enable_phy_interrupts(adapter))
345 printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n"); 307 printk(KERN_ERR "ERROR enabling PHY interrupts\n");
346 if (netxen_niu_gbe_clear_phy_interrupts(adapter)) 308 if (netxen_niu_gbe_clear_phy_interrupts(adapter))
347 printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n"); 309 printk(KERN_ERR "ERROR clearing PHY interrupts\n");
348} 310}
349 311
350/* 312/*
@@ -353,36 +315,27 @@ static void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter,
353static void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter, 315static void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter,
354 int port, long enable) 316 int port, long enable)
355{ 317{
356 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2); 318 NXWR32(adapter, NETXEN_NIU_MODE, 0x2);
357 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 319 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x80000000);
358 0x80000000); 320 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x0000f0025);
359 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 321 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port), 0xf2ff);
360 0x0000f0025); 322 NXWR32(adapter, NETXEN_NIU_GB0_MII_MODE + (port << 3), 0);
361 netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port), 323 NXWR32(adapter, NETXEN_NIU_GB0_GMII_MODE + (port << 3), 1);
362 0xf2ff); 324 NXWR32(adapter, (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
363 netxen_crb_writelit_adapter(adapter, 325 NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
364 NETXEN_NIU_GB0_MII_MODE + (port << 3), 0);
365 netxen_crb_writelit_adapter(adapter,
366 NETXEN_NIU_GB0_GMII_MODE + (port << 3), 1);
367 netxen_crb_writelit_adapter(adapter,
368 (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
369 netxen_crb_writelit_adapter(adapter,
370 NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
371 326
372 if (enable) { 327 if (enable) {
373 /* 328 /*
374 * Do NOT enable flow control until a suitable solution for 329 * Do NOT enable flow control until a suitable solution for
375 * shutting down pause frames is found. 330 * shutting down pause frames is found.
376 */ 331 */
377 netxen_crb_writelit_adapter(adapter, 332 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x5);
378 NETXEN_NIU_GB_MAC_CONFIG_0(port),
379 0x5);
380 } 333 }
381 334
382 if (netxen_niu_gbe_enable_phy_interrupts(adapter)) 335 if (netxen_niu_gbe_enable_phy_interrupts(adapter))
383 printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n"); 336 printk(KERN_ERR "ERROR enabling PHY interrupts\n");
384 if (netxen_niu_gbe_clear_phy_interrupts(adapter)) 337 if (netxen_niu_gbe_clear_phy_interrupts(adapter))
385 printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n"); 338 printk(KERN_ERR "ERROR clearing PHY interrupts\n");
386} 339}
387 340
388int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port) 341int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
@@ -416,25 +369,20 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
416 * plugged in. 369 * plugged in.
417 */ 370 */
418 371
419 netxen_crb_writelit_adapter(adapter, 372 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
420 NETXEN_NIU_GB_MAC_CONFIG_0
421 (port),
422 NETXEN_GB_MAC_SOFT_RESET); 373 NETXEN_GB_MAC_SOFT_RESET);
423 netxen_crb_writelit_adapter(adapter, 374 NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
424 NETXEN_NIU_GB_MAC_CONFIG_0 375 NETXEN_GB_MAC_RESET_PROT_BLK |
425 (port), 376 NETXEN_GB_MAC_ENABLE_TX_RX |
426 NETXEN_GB_MAC_RESET_PROT_BLK 377 NETXEN_GB_MAC_PAUSED_FRMS);
427 | NETXEN_GB_MAC_ENABLE_TX_RX
428 |
429 NETXEN_GB_MAC_PAUSED_FRMS);
430 if (netxen_niu_gbe_clear_phy_interrupts(adapter)) 378 if (netxen_niu_gbe_clear_phy_interrupts(adapter))
431 printk(KERN_ERR PFX 379 printk(KERN_ERR
432 "ERROR clearing PHY interrupts\n"); 380 "ERROR clearing PHY interrupts\n");
433 if (netxen_niu_gbe_enable_phy_interrupts(adapter)) 381 if (netxen_niu_gbe_enable_phy_interrupts(adapter))
434 printk(KERN_ERR PFX 382 printk(KERN_ERR
435 "ERROR enabling PHY interrupts\n"); 383 "ERROR enabling PHY interrupts\n");
436 if (netxen_niu_gbe_clear_phy_interrupts(adapter)) 384 if (netxen_niu_gbe_clear_phy_interrupts(adapter))
437 printk(KERN_ERR PFX 385 printk(KERN_ERR
438 "ERROR clearing PHY interrupts\n"); 386 "ERROR clearing PHY interrupts\n");
439 result = -1; 387 result = -1;
440 } 388 }
@@ -447,10 +395,8 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
447int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) 395int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
448{ 396{
449 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 397 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
450 netxen_crb_writelit_adapter(adapter, 398 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
451 NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); 399 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
452 netxen_crb_writelit_adapter(adapter,
453 NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
454 } 400 }
455 401
456 return 0; 402 return 0;
@@ -473,12 +419,8 @@ static int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
473 if ((phy < 0) || (phy > 3)) 419 if ((phy < 0) || (phy > 3))
474 return -EINVAL; 420 return -EINVAL;
475 421
476 if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), 422 stationhigh = NXRD32(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy));
477 &stationhigh, 4)) 423 stationlow = NXRD32(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy));
478 return -EIO;
479 if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy),
480 &stationlow, 4))
481 return -EIO;
482 ((__le32 *)val)[1] = cpu_to_le32(stationhigh); 424 ((__le32 *)val)[1] = cpu_to_le32(stationhigh);
483 ((__le32 *)val)[0] = cpu_to_le32(stationlow); 425 ((__le32 *)val)[0] = cpu_to_le32(stationlow);
484 426
@@ -507,14 +449,12 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
507 temp[0] = temp[1] = 0; 449 temp[0] = temp[1] = 0;
508 memcpy(temp + 2, addr, 2); 450 memcpy(temp + 2, addr, 2);
509 val = le32_to_cpu(*(__le32 *)temp); 451 val = le32_to_cpu(*(__le32 *)temp);
510 if (adapter->hw_write_wx(adapter, 452 if (NXWR32(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), val))
511 NETXEN_NIU_GB_STATION_ADDR_1(phy), &val, 4))
512 return -EIO; 453 return -EIO;
513 454
514 memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32)); 455 memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32));
515 val = le32_to_cpu(*(__le32 *)temp); 456 val = le32_to_cpu(*(__le32 *)temp);
516 if (adapter->hw_write_wx(adapter, 457 if (NXWR32(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), val))
517 NETXEN_NIU_GB_STATION_ADDR_0(phy), &val, 4))
518 return -2; 458 return -2;
519 459
520 netxen_niu_macaddr_get(adapter, 460 netxen_niu_macaddr_get(adapter,
@@ -545,8 +485,7 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
545 return -EINVAL; 485 return -EINVAL;
546 mac_cfg0 = 0; 486 mac_cfg0 = 0;
547 netxen_gb_soft_reset(mac_cfg0); 487 netxen_gb_soft_reset(mac_cfg0);
548 if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 488 if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), mac_cfg0))
549 &mac_cfg0, 4))
550 return -EIO; 489 return -EIO;
551 return 0; 490 return 0;
552} 491}
@@ -564,8 +503,8 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
564 return -EINVAL; 503 return -EINVAL;
565 504
566 mac_cfg = 0; 505 mac_cfg = 0;
567 if (adapter->hw_write_wx(adapter, 506 if (NXWR32(adapter,
568 NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), &mac_cfg, 4)) 507 NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg))
569 return -EIO; 508 return -EIO;
570 return 0; 509 return 0;
571} 510}
@@ -581,9 +520,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
581 return -EINVAL; 520 return -EINVAL;
582 521
583 /* save previous contents */ 522 /* save previous contents */
584 if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, 523 reg = NXRD32(adapter, NETXEN_NIU_GB_DROP_WRONGADDR);
585 &reg, 4))
586 return -EIO;
587 if (mode == NETXEN_NIU_PROMISC_MODE) { 524 if (mode == NETXEN_NIU_PROMISC_MODE) {
588 switch (port) { 525 switch (port) {
589 case 0: 526 case 0:
@@ -619,8 +556,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
619 return -EIO; 556 return -EIO;
620 } 557 }
621 } 558 }
622 if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, 559 if (NXWR32(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, reg))
623 &reg, 4))
624 return -EIO; 560 return -EIO;
625 return 0; 561 return 0;
626} 562}
@@ -647,28 +583,24 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
647 case 0: 583 case 0:
648 memcpy(temp + 2, addr, 2); 584 memcpy(temp + 2, addr, 2);
649 val = le32_to_cpu(*(__le32 *)temp); 585 val = le32_to_cpu(*(__le32 *)temp);
650 if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, 586 if (NXWR32(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, val))
651 &val, 4))
652 return -EIO; 587 return -EIO;
653 588
654 memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); 589 memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
655 val = le32_to_cpu(*(__le32 *)temp); 590 val = le32_to_cpu(*(__le32 *)temp);
656 if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, 591 if (NXWR32(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, val))
657 &val, 4))
658 return -EIO; 592 return -EIO;
659 break; 593 break;
660 594
661 case 1: 595 case 1:
662 memcpy(temp + 2, addr, 2); 596 memcpy(temp + 2, addr, 2);
663 val = le32_to_cpu(*(__le32 *)temp); 597 val = le32_to_cpu(*(__le32 *)temp);
664 if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1, 598 if (NXWR32(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1, val))
665 &val, 4))
666 return -EIO; 599 return -EIO;
667 600
668 memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); 601 memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
669 val = le32_to_cpu(*(__le32 *)temp); 602 val = le32_to_cpu(*(__le32 *)temp);
670 if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI, 603 if (NXWR32(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI, val))
671 &val, 4))
672 return -EIO; 604 return -EIO;
673 break; 605 break;
674 606
@@ -689,9 +621,7 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
689 if (port > NETXEN_NIU_MAX_XG_PORTS) 621 if (port > NETXEN_NIU_MAX_XG_PORTS)
690 return -EINVAL; 622 return -EINVAL;
691 623
692 if (adapter->hw_read_wx(adapter, 624 reg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
693 NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), &reg, 4))
694 return -EIO;
695 if (mode == NETXEN_NIU_PROMISC_MODE) 625 if (mode == NETXEN_NIU_PROMISC_MODE)
696 reg = (reg | 0x2000UL); 626 reg = (reg | 0x2000UL);
697 else 627 else
@@ -702,8 +632,7 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
702 else 632 else
703 reg = (reg & ~0x1000UL); 633 reg = (reg & ~0x1000UL);
704 634
705 netxen_crb_writelit_adapter(adapter, 635 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
706 NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
707 636
708 return 0; 637 return 0;
709} 638}
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 50183335e43a..845dcf436cf6 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -36,23 +36,25 @@
36 */ 36 */
37#define NIC_CRB_BASE NETXEN_CAM_RAM(0x200) 37#define NIC_CRB_BASE NETXEN_CAM_RAM(0x200)
38#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X)) 38#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X))
39#define NIC_CRB_BASE_2 NETXEN_CAM_RAM(0x700)
40#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X))
39 41
40#define CRB_PHAN_CNTRL_LO_OFFSET NETXEN_NIC_REG(0x00) 42#define CRB_PHAN_CNTRL_LO_OFFSET NETXEN_NIC_REG(0x00)
41#define CRB_PHAN_CNTRL_HI_OFFSET NETXEN_NIC_REG(0x04) 43#define CRB_PHAN_CNTRL_HI_OFFSET NETXEN_NIC_REG(0x04)
42#define CRB_CMD_PRODUCER_OFFSET NETXEN_NIC_REG(0x08) 44#define CRB_CMD_PRODUCER_OFFSET NETXEN_NIC_REG(0x08)
43#define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c) 45#define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c)
44#define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10) /* C0 EPG BUG */ 46#define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10)
45#define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14) 47#define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14)
46#define NX_CDRP_CRB_OFFSET NETXEN_NIC_REG(0x18) 48#define NX_CDRP_CRB_OFFSET NETXEN_NIC_REG(0x18)
47#define NX_ARG1_CRB_OFFSET NETXEN_NIC_REG(0x1c) 49#define NX_ARG1_CRB_OFFSET NETXEN_NIC_REG(0x1c)
48#define NX_ARG2_CRB_OFFSET NETXEN_NIC_REG(0x20) 50#define NX_ARG2_CRB_OFFSET NETXEN_NIC_REG(0x20)
49#define NX_ARG3_CRB_OFFSET NETXEN_NIC_REG(0x24) 51#define NX_ARG3_CRB_OFFSET NETXEN_NIC_REG(0x24)
50#define NX_SIGN_CRB_OFFSET NETXEN_NIC_REG(0x28) 52#define NX_SIGN_CRB_OFFSET NETXEN_NIC_REG(0x28)
51#define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x20) /* 4 regs for perf */ 53#define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x20)
52#define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x24) 54#define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x24)
53#define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x28) 55#define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x28)
54#define CRB_RCV_DMA_LOOP NETXEN_NIC_REG(0x2c) 56#define CRB_RCV_DMA_LOOP NETXEN_NIC_REG(0x2c)
55#define CRB_ENABLE_TX_INTR NETXEN_NIC_REG(0x30) /* phantom init status */ 57#define CRB_ENABLE_TX_INTR NETXEN_NIC_REG(0x30)
56#define CRB_MMAP_ADDR_3 NETXEN_NIC_REG(0x34) 58#define CRB_MMAP_ADDR_3 NETXEN_NIC_REG(0x34)
57#define CRB_CMDPEG_CMDRING NETXEN_NIC_REG(0x38) 59#define CRB_CMDPEG_CMDRING NETXEN_NIC_REG(0x38)
58#define CRB_HOST_DUMMY_BUF_ADDR_HI NETXEN_NIC_REG(0x3c) 60#define CRB_HOST_DUMMY_BUF_ADDR_HI NETXEN_NIC_REG(0x3c)
@@ -65,7 +67,7 @@
65#define CRB_MMAP_SIZE_1 NETXEN_NIC_REG(0x58) 67#define CRB_MMAP_SIZE_1 NETXEN_NIC_REG(0x58)
66#define CRB_MMAP_SIZE_2 NETXEN_NIC_REG(0x5c) 68#define CRB_MMAP_SIZE_2 NETXEN_NIC_REG(0x5c)
67#define CRB_MMAP_SIZE_3 NETXEN_NIC_REG(0x60) 69#define CRB_MMAP_SIZE_3 NETXEN_NIC_REG(0x60)
68#define CRB_GLOBAL_INT_COAL NETXEN_NIC_REG(0x64) /* interrupt coalescing */ 70#define CRB_GLOBAL_INT_COAL NETXEN_NIC_REG(0x64)
69#define CRB_INT_COAL_MODE NETXEN_NIC_REG(0x68) 71#define CRB_INT_COAL_MODE NETXEN_NIC_REG(0x68)
70#define CRB_MAX_RCV_BUFS NETXEN_NIC_REG(0x6c) 72#define CRB_MAX_RCV_BUFS NETXEN_NIC_REG(0x6c)
71#define CRB_TX_INT_THRESHOLD NETXEN_NIC_REG(0x70) 73#define CRB_TX_INT_THRESHOLD NETXEN_NIC_REG(0x70)
@@ -83,13 +85,13 @@
83#define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xa0) 85#define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xa0)
84#define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xa4) 86#define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xa4)
85#define CRB_AGENT_TX_MSS NETXEN_NIC_REG(0xa8) 87#define CRB_AGENT_TX_MSS NETXEN_NIC_REG(0xa8)
86#define CRB_TX_STATE NETXEN_NIC_REG(0xac) /* Debug -performance */ 88#define CRB_TX_STATE NETXEN_NIC_REG(0xac)
87#define CRB_TX_COUNT NETXEN_NIC_REG(0xb0) 89#define CRB_TX_COUNT NETXEN_NIC_REG(0xb0)
88#define CRB_RX_STATE NETXEN_NIC_REG(0xb4) 90#define CRB_RX_STATE NETXEN_NIC_REG(0xb4)
89#define CRB_RX_PERF_DEBUG_1 NETXEN_NIC_REG(0xb8) 91#define CRB_RX_PERF_DEBUG_1 NETXEN_NIC_REG(0xb8)
90#define CRB_RX_LRO_CONTROL NETXEN_NIC_REG(0xbc) /* LRO On/OFF */ 92#define CRB_RX_LRO_CONTROL NETXEN_NIC_REG(0xbc)
91#define CRB_RX_LRO_START_NUM NETXEN_NIC_REG(0xc0) 93#define CRB_RX_LRO_START_NUM NETXEN_NIC_REG(0xc0)
92#define CRB_MPORT_MODE NETXEN_NIC_REG(0xc4) /* Multiport Mode */ 94#define CRB_MPORT_MODE NETXEN_NIC_REG(0xc4)
93#define CRB_CMD_RING_SIZE NETXEN_NIC_REG(0xc8) 95#define CRB_CMD_RING_SIZE NETXEN_NIC_REG(0xc8)
94#define CRB_DMA_SHIFT NETXEN_NIC_REG(0xcc) 96#define CRB_DMA_SHIFT NETXEN_NIC_REG(0xcc)
95#define CRB_INT_VECTOR NETXEN_NIC_REG(0xd4) 97#define CRB_INT_VECTOR NETXEN_NIC_REG(0xd4)
@@ -109,8 +111,6 @@
109#define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0) 111#define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0)
110#define CRB_CMD_PRODUCER_OFFSET_2 NETXEN_NIC_REG(0x1b8) 112#define CRB_CMD_PRODUCER_OFFSET_2 NETXEN_NIC_REG(0x1b8)
111#define CRB_CMD_CONSUMER_OFFSET_2 NETXEN_NIC_REG(0x1bc) 113#define CRB_CMD_CONSUMER_OFFSET_2 NETXEN_NIC_REG(0x1bc)
112
113// 1c0 to 1cc used for signature reg
114#define CRB_CMD_PRODUCER_OFFSET_3 NETXEN_NIC_REG(0x1d0) 114#define CRB_CMD_PRODUCER_OFFSET_3 NETXEN_NIC_REG(0x1d0)
115#define CRB_CMD_CONSUMER_OFFSET_3 NETXEN_NIC_REG(0x1d4) 115#define CRB_CMD_CONSUMER_OFFSET_3 NETXEN_NIC_REG(0x1d4)
116#define CRB_TEMP_STATE NETXEN_NIC_REG(0x1b4) 116#define CRB_TEMP_STATE NETXEN_NIC_REG(0x1b4)
@@ -120,8 +120,7 @@
120#define CRB_V2P_2 NETXEN_NIC_REG(0x298) 120#define CRB_V2P_2 NETXEN_NIC_REG(0x298)
121#define CRB_V2P_3 NETXEN_NIC_REG(0x29c) 121#define CRB_V2P_3 NETXEN_NIC_REG(0x29c)
122#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) 122#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
123#define CRB_DRIVER_VERSION NETXEN_NIC_REG(0x2a0) 123#define CRB_DRIVER_VERSION NETXEN_NIC_REG(0x2a0)
124/* sw int status/mask registers */
125#define CRB_SW_INT_MASK_0 NETXEN_NIC_REG(0x1d8) 124#define CRB_SW_INT_MASK_0 NETXEN_NIC_REG(0x1d8)
126#define CRB_SW_INT_MASK_1 NETXEN_NIC_REG(0x1e0) 125#define CRB_SW_INT_MASK_1 NETXEN_NIC_REG(0x1e0)
127#define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4) 126#define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4)
@@ -136,7 +135,8 @@
136#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8) 135#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
137#define CRB_NIC_CAPABILITIES_FW NETXEN_NIC_REG(0x1dc) 136#define CRB_NIC_CAPABILITIES_FW NETXEN_NIC_REG(0x1dc)
138#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270) 137#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
139#define CRB_NIC_MSI_MODE_FW NETXEN_NIC_REG(0x274) 138#define CRB_NIC_MSI_MODE_FW NETXEN_NIC_REG(0x274)
139#define CRB_FW_CAPABILITIES_1 NETXEN_NIC_REG(0x128)
140 140
141#define INTR_SCHEME_PERPORT 0x1 141#define INTR_SCHEME_PERPORT 0x1
142#define MSI_MODE_MULTIFUNC 0x1 142#define MSI_MODE_MULTIFUNC 0x1
@@ -162,7 +162,8 @@
162 162
163struct netxen_recv_crb { 163struct netxen_recv_crb {
164 u32 crb_rcv_producer[NUM_RCV_DESC_RINGS]; 164 u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
165 u32 crb_sts_consumer; 165 u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
166 u32 sw_int_mask[NUM_STS_DESC_RINGS];
166}; 167};
167 168
168/* 169/*
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 5eeb5a87b738..067caba43656 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1735,6 +1735,19 @@ out:
1735 return ret; 1735 return ret;
1736} 1736}
1737 1737
1738static const struct net_device_ops pasemi_netdev_ops = {
1739 .ndo_open = pasemi_mac_open,
1740 .ndo_stop = pasemi_mac_close,
1741 .ndo_start_xmit = pasemi_mac_start_tx,
1742 .ndo_set_multicast_list = pasemi_mac_set_rx_mode,
1743 .ndo_set_mac_address = pasemi_mac_set_mac_addr,
1744 .ndo_change_mtu = pasemi_mac_change_mtu,
1745 .ndo_validate_addr = eth_validate_addr,
1746#ifdef CONFIG_NET_POLL_CONTROLLER
1747 .ndo_poll_controller = pasemi_mac_netpoll,
1748#endif
1749};
1750
1738static int __devinit 1751static int __devinit
1739pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1752pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1740{ 1753{
@@ -1817,19 +1830,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1817 goto out; 1830 goto out;
1818 } 1831 }
1819 1832
1820 dev->open = pasemi_mac_open; 1833 dev->netdev_ops = &pasemi_netdev_ops;
1821 dev->stop = pasemi_mac_close;
1822 dev->hard_start_xmit = pasemi_mac_start_tx;
1823 dev->set_multicast_list = pasemi_mac_set_rx_mode;
1824 dev->set_mac_address = pasemi_mac_set_mac_addr;
1825 dev->mtu = PE_DEF_MTU; 1834 dev->mtu = PE_DEF_MTU;
1826 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 1835 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1827 mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; 1836 mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
1828#ifdef CONFIG_NET_POLL_CONTROLLER
1829 dev->poll_controller = pasemi_mac_netpoll;
1830#endif
1831 1837
1832 dev->change_mtu = pasemi_mac_change_mtu;
1833 dev->ethtool_ops = &pasemi_mac_ethtool_ops; 1838 dev->ethtool_ops = &pasemi_mac_ethtool_ops;
1834 1839
1835 if (err) 1840 if (err)
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index c95fd72c3bb9..8c1f6988f398 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -728,6 +728,17 @@ err_out:
728 return rc; 728 return rc;
729} 729}
730 730
731static const struct net_device_ops netdrv_netdev_ops = {
732 .ndo_open = netdrv_open,
733 .ndo_stop = netdrv_close,
734 .ndo_start_xmit = netdrv_start_xmit,
735 .ndo_set_multicast_list = netdrv_set_rx_mode,
736 .ndo_do_ioctl = netdrv_ioctl,
737 .ndo_tx_timeout = netdrv_tx_timeout,
738 .ndo_change_mtu = eth_change_mtu,
739 .ndo_validate_addr = eth_validate_addr,
740 .ndo_set_mac_address = eth_mac_addr,
741};
731 742
732static int __devinit netdrv_init_one (struct pci_dev *pdev, 743static int __devinit netdrv_init_one (struct pci_dev *pdev,
733 const struct pci_device_id *ent) 744 const struct pci_device_id *ent)
@@ -769,13 +780,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
769 ((u16 *) (dev->dev_addr))[i] = 780 ((u16 *) (dev->dev_addr))[i] =
770 le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len)); 781 le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
771 782
772 /* The Rtl8139-specific entries in the device structure. */ 783 dev->netdev_ops = &netdrv_netdev_ops;
773 dev->open = netdrv_open;
774 dev->hard_start_xmit = netdrv_start_xmit;
775 dev->stop = netdrv_close;
776 dev->set_multicast_list = netdrv_set_rx_mode;
777 dev->do_ioctl = netdrv_ioctl;
778 dev->tx_timeout = netdrv_tx_timeout;
779 dev->watchdog_timeo = TX_TIMEOUT; 784 dev->watchdog_timeo = TX_TIMEOUT;
780 785
781 dev->irq = pdev->irq; 786 dev->irq = pdev->irq;
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 5b07dd8e5c04..5981debcde5e 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -1238,8 +1238,7 @@ static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
1238 struct pppol2tp_session *session; 1238 struct pppol2tp_session *session;
1239 struct sock *sk; 1239 struct sock *sk;
1240 1240
1241 if (tunnel == NULL) 1241 BUG_ON(tunnel == NULL);
1242 BUG();
1243 1242
1244 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1243 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1245 "%s: closing all sessions...\n", tunnel->name); 1244 "%s: closing all sessions...\n", tunnel->name);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0b6e8c896835..c812e16b7ab4 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3791,16 +3791,13 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
3791 return &dev->stats; 3791 return &dev->stats;
3792} 3792}
3793 3793
3794#ifdef CONFIG_PM 3794static void rtl8169_net_suspend(struct net_device *dev)
3795
3796static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
3797{ 3795{
3798 struct net_device *dev = pci_get_drvdata(pdev);
3799 struct rtl8169_private *tp = netdev_priv(dev); 3796 struct rtl8169_private *tp = netdev_priv(dev);
3800 void __iomem *ioaddr = tp->mmio_addr; 3797 void __iomem *ioaddr = tp->mmio_addr;
3801 3798
3802 if (!netif_running(dev)) 3799 if (!netif_running(dev))
3803 goto out_pci_suspend; 3800 return;
3804 3801
3805 netif_device_detach(dev); 3802 netif_device_detach(dev);
3806 netif_stop_queue(dev); 3803 netif_stop_queue(dev);
@@ -3812,24 +3809,25 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
3812 rtl8169_rx_missed(dev, ioaddr); 3809 rtl8169_rx_missed(dev, ioaddr);
3813 3810
3814 spin_unlock_irq(&tp->lock); 3811 spin_unlock_irq(&tp->lock);
3812}
3813
3814#ifdef CONFIG_PM
3815
3816static int rtl8169_suspend(struct device *device)
3817{
3818 struct pci_dev *pdev = to_pci_dev(device);
3819 struct net_device *dev = pci_get_drvdata(pdev);
3815 3820
3816out_pci_suspend: 3821 rtl8169_net_suspend(dev);
3817 pci_save_state(pdev);
3818 pci_enable_wake(pdev, pci_choose_state(pdev, state),
3819 (tp->features & RTL_FEATURE_WOL) ? 1 : 0);
3820 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3821 3822
3822 return 0; 3823 return 0;
3823} 3824}
3824 3825
3825static int rtl8169_resume(struct pci_dev *pdev) 3826static int rtl8169_resume(struct device *device)
3826{ 3827{
3828 struct pci_dev *pdev = to_pci_dev(device);
3827 struct net_device *dev = pci_get_drvdata(pdev); 3829 struct net_device *dev = pci_get_drvdata(pdev);
3828 3830
3829 pci_set_power_state(pdev, PCI_D0);
3830 pci_restore_state(pdev);
3831 pci_enable_wake(pdev, PCI_D0, 0);
3832
3833 if (!netif_running(dev)) 3831 if (!netif_running(dev))
3834 goto out; 3832 goto out;
3835 3833
@@ -3840,23 +3838,42 @@ out:
3840 return 0; 3838 return 0;
3841} 3839}
3842 3840
3841static struct dev_pm_ops rtl8169_pm_ops = {
3842 .suspend = rtl8169_suspend,
3843 .resume = rtl8169_resume,
3844 .freeze = rtl8169_suspend,
3845 .thaw = rtl8169_resume,
3846 .poweroff = rtl8169_suspend,
3847 .restore = rtl8169_resume,
3848};
3849
3850#define RTL8169_PM_OPS (&rtl8169_pm_ops)
3851
3852#else /* !CONFIG_PM */
3853
3854#define RTL8169_PM_OPS NULL
3855
3856#endif /* !CONFIG_PM */
3857
3843static void rtl_shutdown(struct pci_dev *pdev) 3858static void rtl_shutdown(struct pci_dev *pdev)
3844{ 3859{
3845 rtl8169_suspend(pdev, PMSG_SUSPEND); 3860 struct net_device *dev = pci_get_drvdata(pdev);
3846} 3861
3862 rtl8169_net_suspend(dev);
3847 3863
3848#endif /* CONFIG_PM */ 3864 if (system_state == SYSTEM_POWER_OFF) {
3865 pci_wake_from_d3(pdev, true);
3866 pci_set_power_state(pdev, PCI_D3hot);
3867 }
3868}
3849 3869
3850static struct pci_driver rtl8169_pci_driver = { 3870static struct pci_driver rtl8169_pci_driver = {
3851 .name = MODULENAME, 3871 .name = MODULENAME,
3852 .id_table = rtl8169_pci_tbl, 3872 .id_table = rtl8169_pci_tbl,
3853 .probe = rtl8169_init_one, 3873 .probe = rtl8169_init_one,
3854 .remove = __devexit_p(rtl8169_remove_one), 3874 .remove = __devexit_p(rtl8169_remove_one),
3855#ifdef CONFIG_PM
3856 .suspend = rtl8169_suspend,
3857 .resume = rtl8169_resume,
3858 .shutdown = rtl_shutdown, 3875 .shutdown = rtl_shutdown,
3859#endif 3876 .driver.pm = RTL8169_PM_OPS,
3860}; 3877};
3861 3878
3862static int __init rtl8169_init_module(void) 3879static int __init rtl8169_init_module(void)
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index ec59e29807a6..8702e7acdee6 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -428,6 +428,15 @@ static const struct ethtool_ops rionet_ethtool_ops = {
428 .get_link = ethtool_op_get_link, 428 .get_link = ethtool_op_get_link,
429}; 429};
430 430
431static const struct net_device_ops rionet_netdev_ops = {
432 .ndo_open = rionet_open,
433 .ndo_stop = rionet_close,
434 .ndo_start_xmit = rionet_start_xmit,
435 .ndo_change_mtu = eth_change_mtu,
436 .ndo_validate_addr = eth_validate_addr,
437 .ndo_set_mac_address = eth_mac_addr,
438};
439
431static int rionet_setup_netdev(struct rio_mport *mport) 440static int rionet_setup_netdev(struct rio_mport *mport)
432{ 441{
433 int rc = 0; 442 int rc = 0;
@@ -466,10 +475,7 @@ static int rionet_setup_netdev(struct rio_mport *mport)
466 ndev->dev_addr[4] = device_id >> 8; 475 ndev->dev_addr[4] = device_id >> 8;
467 ndev->dev_addr[5] = device_id & 0xff; 476 ndev->dev_addr[5] = device_id & 0xff;
468 477
469 /* Fill in the driver function table */ 478 ndev->netdev_ops = &rionet_netdev_ops;
470 ndev->open = &rionet_open;
471 ndev->hard_start_xmit = &rionet_start_xmit;
472 ndev->stop = &rionet_close;
473 ndev->mtu = RIO_MAX_MSG_SIZE - 14; 479 ndev->mtu = RIO_MAX_MSG_SIZE - 14;
474 ndev->features = NETIF_F_LLTX; 480 ndev->features = NETIF_F_LLTX;
475 SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops); 481 SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index ce7551e17ba7..aaeebf57a9a4 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2271,6 +2271,21 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
2271 return 0; 2271 return 0;
2272} 2272}
2273 2273
2274static const struct net_device_ops sbmac_netdev_ops = {
2275 .ndo_open = sbmac_open,
2276 .ndo_stop = sbmac_close,
2277 .ndo_start_xmit = sbmac_start_tx,
2278 .ndo_set_multicast_list = sbmac_set_rx_mode,
2279 .ndo_tx_timeout = sbmac_tx_timeout,
2280 .ndo_do_ioctl = sbmac_mii_ioctl,
2281 .ndo_change_mtu = sb1250_change_mtu,
2282 .ndo_validate_addr = eth_validate_addr,
2283 .ndo_set_mac_address = eth_mac_addr,
2284#ifdef CONFIG_NET_POLL_CONTROLLER
2285 .ndo_poll_controller = sbmac_netpoll,
2286#endif
2287};
2288
2274/********************************************************************** 2289/**********************************************************************
2275 * SBMAC_INIT(dev) 2290 * SBMAC_INIT(dev)
2276 * 2291 *
@@ -2327,21 +2342,11 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2327 2342
2328 spin_lock_init(&(sc->sbm_lock)); 2343 spin_lock_init(&(sc->sbm_lock));
2329 2344
2330 dev->open = sbmac_open; 2345 dev->netdev_ops = &sbmac_netdev_ops;
2331 dev->hard_start_xmit = sbmac_start_tx; 2346 dev->watchdog_timeo = TX_TIMEOUT;
2332 dev->stop = sbmac_close;
2333 dev->set_multicast_list = sbmac_set_rx_mode;
2334 dev->do_ioctl = sbmac_mii_ioctl;
2335 dev->tx_timeout = sbmac_tx_timeout;
2336 dev->watchdog_timeo = TX_TIMEOUT;
2337 2347
2338 netif_napi_add(dev, &sc->napi, sbmac_poll, 16); 2348 netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
2339 2349
2340 dev->change_mtu = sb1250_change_mtu;
2341#ifdef CONFIG_NET_POLL_CONTROLLER
2342 dev->poll_controller = sbmac_netpoll;
2343#endif
2344
2345 dev->irq = UNIT_INT(idx); 2350 dev->irq = UNIT_INT(idx);
2346 2351
2347 /* This is needed for PASS2 for Rx H/W checksum feature */ 2352 /* This is needed for PASS2 for Rx H/W checksum feature */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index 5182ac5a1034..4a4c74c891b7 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -172,7 +172,6 @@ static const u8 sfe4002_lm87_regs[] = {
172static struct i2c_board_info sfe4002_hwmon_info = { 172static struct i2c_board_info sfe4002_hwmon_info = {
173 I2C_BOARD_INFO("lm87", 0x2e), 173 I2C_BOARD_INFO("lm87", 0x2e),
174 .platform_data = &sfe4002_lm87_channel, 174 .platform_data = &sfe4002_lm87_channel,
175 .irq = -1,
176}; 175};
177 176
178/****************************************************************************/ 177/****************************************************************************/
@@ -247,7 +246,6 @@ static const u8 sfn4112f_lm87_regs[] = {
247static struct i2c_board_info sfn4112f_hwmon_info = { 246static struct i2c_board_info sfn4112f_hwmon_info = {
248 I2C_BOARD_INFO("lm87", 0x2e), 247 I2C_BOARD_INFO("lm87", 0x2e),
249 .platform_data = &sfn4112f_lm87_channel, 248 .platform_data = &sfn4112f_lm87_channel,
250 .irq = -1,
251}; 249};
252 250
253#define SFN4112F_ACT_LED 0 251#define SFN4112F_ACT_LED 0
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 66d7fe3db3e6..01f9432c31ef 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -450,17 +450,27 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
450 450
451 /* Pass the skb/page into the LRO engine */ 451 /* Pass the skb/page into the LRO engine */
452 if (rx_buf->page) { 452 if (rx_buf->page) {
453 struct napi_gro_fraginfo info; 453 struct sk_buff *skb = napi_get_frags(napi);
454 454
455 info.frags[0].page = rx_buf->page; 455 if (!skb) {
456 info.frags[0].page_offset = efx_rx_buf_offset(rx_buf); 456 put_page(rx_buf->page);
457 info.frags[0].size = rx_buf->len; 457 goto out;
458 info.nr_frags = 1; 458 }
459 info.ip_summed = CHECKSUM_UNNECESSARY; 459
460 info.len = rx_buf->len; 460 skb_shinfo(skb)->frags[0].page = rx_buf->page;
461 skb_shinfo(skb)->frags[0].page_offset =
462 efx_rx_buf_offset(rx_buf);
463 skb_shinfo(skb)->frags[0].size = rx_buf->len;
464 skb_shinfo(skb)->nr_frags = 1;
465
466 skb->len = rx_buf->len;
467 skb->data_len = rx_buf->len;
468 skb->truesize += rx_buf->len;
469 skb->ip_summed = CHECKSUM_UNNECESSARY;
461 470
462 napi_gro_frags(napi, &info); 471 napi_gro_frags(napi);
463 472
473out:
464 EFX_BUG_ON_PARANOID(rx_buf->skb); 474 EFX_BUG_ON_PARANOID(rx_buf->skb);
465 rx_buf->page = NULL; 475 rx_buf->page = NULL;
466 } else { 476 } else {
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 4eac5da81e5a..cee00ad49b57 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -296,7 +296,6 @@ static int sfe4001_check_hw(struct efx_nic *efx)
296 296
297static struct i2c_board_info sfe4001_hwmon_info = { 297static struct i2c_board_info sfe4001_hwmon_info = {
298 I2C_BOARD_INFO("max6647", 0x4e), 298 I2C_BOARD_INFO("max6647", 0x4e),
299 .irq = -1,
300}; 299};
301 300
302/* This board uses an I2C expander to provider power to the PHY, which needs to 301/* This board uses an I2C expander to provider power to the PHY, which needs to
@@ -389,12 +388,10 @@ static void sfn4111t_fini(struct efx_nic *efx)
389 388
390static struct i2c_board_info sfn4111t_a0_hwmon_info = { 389static struct i2c_board_info sfn4111t_a0_hwmon_info = {
391 I2C_BOARD_INFO("max6647", 0x4e), 390 I2C_BOARD_INFO("max6647", 0x4e),
392 .irq = -1,
393}; 391};
394 392
395static struct i2c_board_info sfn4111t_r5_hwmon_info = { 393static struct i2c_board_info sfn4111t_r5_hwmon_info = {
396 I2C_BOARD_INFO("max6646", 0x4d), 394 I2C_BOARD_INFO("max6646", 0x4d),
397 .irq = -1,
398}; 395};
399 396
400int sfn4111t_init(struct efx_nic *efx) 397int sfn4111t_init(struct efx_nic *efx)
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 97d68560067d..5fb88ca6dd7f 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -709,6 +709,17 @@ static inline void setup_rx_ring(struct net_device *dev,
709 dma_sync_desc_dev(dev, &buf[i]); 709 dma_sync_desc_dev(dev, &buf[i]);
710} 710}
711 711
712static const struct net_device_ops sgiseeq_netdev_ops = {
713 .ndo_open = sgiseeq_open,
714 .ndo_stop = sgiseeq_close,
715 .ndo_start_xmit = sgiseeq_start_xmit,
716 .ndo_tx_timeout = timeout,
717 .ndo_set_multicast_list = sgiseeq_set_multicast,
718 .ndo_set_mac_address = sgiseeq_set_mac_address,
719 .ndo_change_mtu = eth_change_mtu,
720 .ndo_validate_addr = eth_validate_addr,
721};
722
712static int __init sgiseeq_probe(struct platform_device *pdev) 723static int __init sgiseeq_probe(struct platform_device *pdev)
713{ 724{
714 struct sgiseeq_platform_data *pd = pdev->dev.platform_data; 725 struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
@@ -775,13 +786,8 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
775 SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | 786 SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
776 SEEQ_CTRL_ENCARR; 787 SEEQ_CTRL_ENCARR;
777 788
778 dev->open = sgiseeq_open; 789 dev->netdev_ops = &sgiseeq_netdev_ops;
779 dev->stop = sgiseeq_close;
780 dev->hard_start_xmit = sgiseeq_start_xmit;
781 dev->tx_timeout = timeout;
782 dev->watchdog_timeo = (200 * HZ) / 1000; 790 dev->watchdog_timeo = (200 * HZ) / 1000;
783 dev->set_multicast_list = sgiseeq_set_multicast;
784 dev->set_mac_address = sgiseeq_set_mac_address;
785 dev->irq = irq; 791 dev->irq = irq;
786 792
787 if (register_netdev(dev)) { 793 if (register_netdev(dev)) {
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 293610334a77..bc4976ac8712 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1774,6 +1774,20 @@ static int __devinit smc911x_findirq(struct net_device *dev)
1774 return probe_irq_off(cookie); 1774 return probe_irq_off(cookie);
1775} 1775}
1776 1776
1777static const struct net_device_ops smc911x_netdev_ops = {
1778 .ndo_open = smc911x_open,
1779 .ndo_stop = smc911x_close,
1780 .ndo_start_xmit = smc911x_hard_start_xmit,
1781 .ndo_tx_timeout = smc911x_timeout,
1782 .ndo_set_multicast_list = smc911x_set_multicast_list,
1783 .ndo_change_mtu = eth_change_mtu,
1784 .ndo_validate_addr = eth_validate_addr,
1785 .ndo_set_mac_address = eth_mac_addr,
1786#ifdef CONFIG_NET_POLL_CONTROLLER
1787 .ndo_poll_controller = smc911x_poll_controller,
1788#endif
1789};
1790
1777/* 1791/*
1778 * Function: smc911x_probe(unsigned long ioaddr) 1792 * Function: smc911x_probe(unsigned long ioaddr)
1779 * 1793 *
@@ -1940,16 +1954,9 @@ static int __devinit smc911x_probe(struct net_device *dev)
1940 /* Fill in the fields of the device structure with ethernet values. */ 1954 /* Fill in the fields of the device structure with ethernet values. */
1941 ether_setup(dev); 1955 ether_setup(dev);
1942 1956
1943 dev->open = smc911x_open; 1957 dev->netdev_ops = &smc911x_netdev_ops;
1944 dev->stop = smc911x_close;
1945 dev->hard_start_xmit = smc911x_hard_start_xmit;
1946 dev->tx_timeout = smc911x_timeout;
1947 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1958 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1948 dev->set_multicast_list = smc911x_set_multicast_list;
1949 dev->ethtool_ops = &smc911x_ethtool_ops; 1959 dev->ethtool_ops = &smc911x_ethtool_ops;
1950#ifdef CONFIG_NET_POLL_CONTROLLER
1951 dev->poll_controller = smc911x_poll_controller;
1952#endif
1953 1960
1954 INIT_WORK(&lp->phy_configure, smc911x_phy_configure); 1961 INIT_WORK(&lp->phy_configure, smc911x_phy_configure);
1955 lp->mii.phy_id_mask = 0x1f; 1962 lp->mii.phy_id_mask = 0x1f;
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index e5beb299cbd0..9bd9dadb8534 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -294,6 +294,16 @@ out:
294 return ERR_PTR(err); 294 return ERR_PTR(err);
295} 295}
296 296
297static const struct net_device_ops lance_netdev_ops = {
298 .ndo_open = lance_open,
299 .ndo_stop = lance_close,
300 .ndo_start_xmit = lance_start_xmit,
301 .ndo_set_multicast_list = set_multicast_list,
302 .ndo_set_mac_address = NULL,
303 .ndo_change_mtu = eth_change_mtu,
304 .ndo_validate_addr = eth_validate_addr,
305};
306
297static int __init lance_probe( struct net_device *dev) 307static int __init lance_probe( struct net_device *dev)
298{ 308{
299 unsigned long ioaddr; 309 unsigned long ioaddr;
@@ -397,12 +407,7 @@ static int __init lance_probe( struct net_device *dev)
397 if (did_version++ == 0) 407 if (did_version++ == 0)
398 printk( version ); 408 printk( version );
399 409
400 /* The LANCE-specific entries in the device structure. */ 410 dev->netdev_ops = &lance_netdev_ops;
401 dev->open = &lance_open;
402 dev->hard_start_xmit = &lance_start_xmit;
403 dev->stop = &lance_close;
404 dev->set_multicast_list = &set_multicast_list;
405 dev->set_mac_address = NULL;
406// KLUDGE -- REMOVE ME 411// KLUDGE -- REMOVE ME
407 set_bit(__LINK_STATE_PRESENT, &dev->state); 412 set_bit(__LINK_STATE_PRESENT, &dev->state);
408 413
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 201be425643a..eb65e25989f3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.98" 71#define DRV_MODULE_VERSION "3.99"
72#define DRV_MODULE_RELDATE "February 25, 2009" 72#define DRV_MODULE_RELDATE "April 20, 2009"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -1950,7 +1950,8 @@ static void tg3_frob_aux_power(struct tg3 *tp)
1950 GRC_LCLCTRL_GPIO_OUTPUT0 | 1950 GRC_LCLCTRL_GPIO_OUTPUT0 |
1951 GRC_LCLCTRL_GPIO_OUTPUT1), 1951 GRC_LCLCTRL_GPIO_OUTPUT1),
1952 100); 1952 100);
1953 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) { 1953 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
1954 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
1954 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 1955 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1955 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 1956 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1956 GRC_LCLCTRL_GPIO_OE1 | 1957 GRC_LCLCTRL_GPIO_OE1 |
@@ -2455,8 +2456,6 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2455 } 2456 }
2456 } 2457 }
2457 2458
2458 __tg3_set_mac_addr(tp, 0);
2459
2460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2461 u32 val; 2460 u32 val;
2462 2461
@@ -4656,6 +4655,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4656 * so we must read it before checking for more work. 4655 * so we must read it before checking for more work.
4657 */ 4656 */
4658 tp->last_tag = sblk->status_tag; 4657 tp->last_tag = sblk->status_tag;
4658 tp->last_irq_tag = tp->last_tag;
4659 rmb(); 4659 rmb();
4660 } else 4660 } else
4661 sblk->status &= ~SD_STATUS_UPDATED; 4661 sblk->status &= ~SD_STATUS_UPDATED;
@@ -4811,7 +4811,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4811 * Reading the PCI State register will confirm whether the 4811 * Reading the PCI State register will confirm whether the
4812 * interrupt is ours and will flush the status block. 4812 * interrupt is ours and will flush the status block.
4813 */ 4813 */
4814 if (unlikely(sblk->status_tag == tp->last_tag)) { 4814 if (unlikely(sblk->status_tag == tp->last_irq_tag)) {
4815 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || 4815 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4816 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 4816 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4817 handled = 0; 4817 handled = 0;
@@ -4831,18 +4831,22 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4831 * excessive spurious interrupts can be worse in some cases. 4831 * excessive spurious interrupts can be worse in some cases.
4832 */ 4832 */
4833 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 4833 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4834
4835 /*
4836 * In a shared interrupt configuration, sometimes other devices'
4837 * interrupts will scream. We record the current status tag here
4838 * so that the above check can report that the screaming interrupts
4839 * are unhandled. Eventually they will be silenced.
4840 */
4841 tp->last_irq_tag = sblk->status_tag;
4842
4834 if (tg3_irq_sync(tp)) 4843 if (tg3_irq_sync(tp))
4835 goto out; 4844 goto out;
4836 if (napi_schedule_prep(&tp->napi)) { 4845
4837 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4846 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4838 /* Update last_tag to mark that this status has been 4847
4839 * seen. Because interrupt may be shared, we may be 4848 napi_schedule(&tp->napi);
4840 * racing with tg3_poll(), so only update last_tag 4849
4841 * if tg3_poll() is not scheduled.
4842 */
4843 tp->last_tag = sblk->status_tag;
4844 __napi_schedule(&tp->napi);
4845 }
4846out: 4850out:
4847 return IRQ_RETVAL(handled); 4851 return IRQ_RETVAL(handled);
4848} 4852}
@@ -6156,6 +6160,7 @@ static int tg3_chip_reset(struct tg3 *tp)
6156 tp->hw_status->status_tag = 0; 6160 tp->hw_status->status_tag = 0;
6157 } 6161 }
6158 tp->last_tag = 0; 6162 tp->last_tag = 0;
6163 tp->last_irq_tag = 0;
6159 smp_mb(); 6164 smp_mb();
6160 synchronize_irq(tp->pdev->irq); 6165 synchronize_irq(tp->pdev->irq);
6161 6166
@@ -6350,6 +6355,8 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
6350 tg3_abort_hw(tp, silent); 6355 tg3_abort_hw(tp, silent);
6351 err = tg3_chip_reset(tp); 6356 err = tg3_chip_reset(tp);
6352 6357
6358 __tg3_set_mac_addr(tp, 0);
6359
6353 tg3_write_sig_legacy(tp, kind); 6360 tg3_write_sig_legacy(tp, kind);
6354 tg3_write_sig_post_reset(tp, kind); 6361 tg3_write_sig_post_reset(tp, kind);
6355 6362
@@ -6711,6 +6718,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6711 tw32(TG3_CPMU_HST_ACC, val); 6718 tw32(TG3_CPMU_HST_ACC, val);
6712 } 6719 }
6713 6720
6721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6722 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
6723 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
6724 PCIE_PWR_MGMT_L1_THRESH_4MS;
6725 tw32(PCIE_PWR_MGMT_THRESH, val);
6726 }
6727
6714 /* This works around an issue with Athlon chipsets on 6728 /* This works around an issue with Athlon chipsets on
6715 * B3 tigon3 silicon. This bit has no effect on any 6729 * B3 tigon3 silicon. This bit has no effect on any
6716 * other revision. But do not set this on PCI Express 6730 * other revision. But do not set this on PCI Express
@@ -7138,7 +7152,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7138 udelay(100); 7152 udelay(100);
7139 7153
7140 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 7154 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7141 tp->last_tag = 0;
7142 7155
7143 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7156 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7144 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 7157 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -8539,6 +8552,9 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
8539 u32 i, offset, len, b_offset, b_count; 8552 u32 i, offset, len, b_offset, b_count;
8540 __be32 val; 8553 __be32 val;
8541 8554
8555 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
8556 return -EINVAL;
8557
8542 if (tp->link_config.phy_is_low_power) 8558 if (tp->link_config.phy_is_low_power)
8543 return -EAGAIN; 8559 return -EAGAIN;
8544 8560
@@ -8604,7 +8620,8 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
8604 if (tp->link_config.phy_is_low_power) 8620 if (tp->link_config.phy_is_low_power)
8605 return -EAGAIN; 8621 return -EAGAIN;
8606 8622
8607 if (eeprom->magic != TG3_EEPROM_MAGIC) 8623 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
8624 eeprom->magic != TG3_EEPROM_MAGIC)
8608 return -EINVAL; 8625 return -EINVAL;
8609 8626
8610 offset = eeprom->offset; 8627 offset = eeprom->offset;
@@ -9201,6 +9218,9 @@ static int tg3_test_nvram(struct tg3 *tp)
9201 __be32 *buf; 9218 __be32 *buf;
9202 int i, j, k, err = 0, size; 9219 int i, j, k, err = 0, size;
9203 9220
9221 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9222 return 0;
9223
9204 if (tg3_nvram_read(tp, 0, &magic) != 0) 9224 if (tg3_nvram_read(tp, 0, &magic) != 0)
9205 return -EIO; 9225 return -EIO;
9206 9226
@@ -10183,7 +10203,8 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10183{ 10203{
10184 u32 val; 10204 u32 val;
10185 10205
10186 if (tg3_nvram_read(tp, 0, &val) != 0) 10206 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10207 tg3_nvram_read(tp, 0, &val) != 0)
10187 return; 10208 return;
10188 10209
10189 /* Selfboot format */ 10210 /* Selfboot format */
@@ -10565,6 +10586,7 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10565 } 10586 }
10566 break; 10587 break;
10567 default: 10588 default:
10589 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
10568 return; 10590 return;
10569 } 10591 }
10570 10592
@@ -11365,7 +11387,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
11365 unsigned int i; 11387 unsigned int i;
11366 u32 magic; 11388 u32 magic;
11367 11389
11368 if (tg3_nvram_read(tp, 0x0, &magic)) 11390 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11391 tg3_nvram_read(tp, 0x0, &magic))
11369 goto out_not_found; 11392 goto out_not_found;
11370 11393
11371 if (magic == TG3_EEPROM_MAGIC) { 11394 if (magic == TG3_EEPROM_MAGIC) {
@@ -11457,6 +11480,15 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
11457out_not_found: 11480out_not_found:
11458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 11481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11459 strcpy(tp->board_part_number, "BCM95906"); 11482 strcpy(tp->board_part_number, "BCM95906");
11483 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11484 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
11485 strcpy(tp->board_part_number, "BCM57780");
11486 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11487 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
11488 strcpy(tp->board_part_number, "BCM57760");
11489 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11490 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
11491 strcpy(tp->board_part_number, "BCM57790");
11460 else 11492 else
11461 strcpy(tp->board_part_number, "none"); 11493 strcpy(tp->board_part_number, "none");
11462} 11494}
@@ -11667,6 +11699,14 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11667{ 11699{
11668 u32 val; 11700 u32 val;
11669 11701
11702 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
11703 tp->fw_ver[0] = 's';
11704 tp->fw_ver[1] = 'b';
11705 tp->fw_ver[2] = '\0';
11706
11707 return;
11708 }
11709
11670 if (tg3_nvram_read(tp, 0, &val)) 11710 if (tg3_nvram_read(tp, 0, &val))
11671 return; 11711 return;
11672 11712
@@ -11952,7 +11992,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11952 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; 11992 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 11993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 11994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 11995 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
11996 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
11956 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; 11997 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
11957 } 11998 }
11958 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { 11999 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
@@ -12144,7 +12185,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 12185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12145 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 12186 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12146 12187
12147 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) { 12188 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12189 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12148 /* Turn off the debug UART. */ 12190 /* Turn off the debug UART. */
12149 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 12191 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12150 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) 12192 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
@@ -12454,7 +12496,8 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
12454 } 12496 }
12455 if (!addr_ok) { 12497 if (!addr_ok) {
12456 /* Next, try NVRAM. */ 12498 /* Next, try NVRAM. */
12457 if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 12499 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
12500 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
12458 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 12501 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
12459 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); 12502 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
12460 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); 12503 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index cb4c62abdd21..b3347c41a1a3 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -95,6 +95,8 @@
95#define CHIPREV_ID_5752_A1 0x6001 95#define CHIPREV_ID_5752_A1 0x6001
96#define CHIPREV_ID_5714_A2 0x9002 96#define CHIPREV_ID_5714_A2 0x9002
97#define CHIPREV_ID_5906_A1 0xc001 97#define CHIPREV_ID_5906_A1 0xc001
98#define CHIPREV_ID_57780_A0 0x57780000
99#define CHIPREV_ID_57780_A1 0x57780001
98#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 100#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
99#define ASIC_REV_5700 0x07 101#define ASIC_REV_5700 0x07
100#define ASIC_REV_5701 0x00 102#define ASIC_REV_5701 0x00
@@ -1697,6 +1699,8 @@
1697 1699
1698#define PCIE_PWR_MGMT_THRESH 0x00007d28 1700#define PCIE_PWR_MGMT_THRESH 0x00007d28
1699#define PCIE_PWR_MGMT_L1_THRESH_MSK 0x0000ff00 1701#define PCIE_PWR_MGMT_L1_THRESH_MSK 0x0000ff00
1702#define PCIE_PWR_MGMT_L1_THRESH_4MS 0x0000ff00
1703#define PCIE_PWR_MGMT_EXT_ASPM_TMR_EN 0x01000000
1700 1704
1701 1705
1702/* OTP bit definitions */ 1706/* OTP bit definitions */
@@ -2501,6 +2505,7 @@ struct tg3 {
2501 struct tg3_hw_status *hw_status; 2505 struct tg3_hw_status *hw_status;
2502 dma_addr_t status_mapping; 2506 dma_addr_t status_mapping;
2503 u32 last_tag; 2507 u32 last_tag;
2508 u32 last_irq_tag;
2504 2509
2505 u32 msg_enable; 2510 u32 msg_enable;
2506 2511
@@ -2635,6 +2640,7 @@ struct tg3 {
2635#define TG3_FLG3_CLKREQ_BUG 0x00000800 2640#define TG3_FLG3_CLKREQ_BUG 0x00000800
2636#define TG3_FLG3_PHY_ENABLE_APD 0x00001000 2641#define TG3_FLG3_PHY_ENABLE_APD 0x00001000
2637#define TG3_FLG3_5755_PLUS 0x00002000 2642#define TG3_FLG3_5755_PLUS 0x00002000
2643#define TG3_FLG3_NO_NVRAM 0x00004000
2638 2644
2639 struct timer_list timer; 2645 struct timer_list timer;
2640 u16 timer_counter; 2646 u16 timer_counter;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 264e61404f34..842b1a2c40d4 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1601,8 +1601,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1601 1601
1602 /* no more hardware accesses behind this line. */ 1602 /* no more hardware accesses behind this line. */
1603 1603
1604 BUG_ON(np->csr6); 1604 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1605 if (ioread32(ioaddr + IntrEnable)) BUG();
1606 1605
1607 /* pci_power_off(pdev, -1); */ 1606 /* pci_power_off(pdev, -1); */
1608 1607
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 735bf41c654a..589f0ca668d6 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -540,31 +540,34 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
540 540
541/* Get packet from user space buffer */ 541/* Get packet from user space buffer */
542static __inline__ ssize_t tun_get_user(struct tun_struct *tun, 542static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
543 struct iovec *iv, size_t count, 543 const struct iovec *iv, size_t count,
544 int noblock) 544 int noblock)
545{ 545{
546 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 546 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
547 struct sk_buff *skb; 547 struct sk_buff *skb;
548 size_t len = count, align = 0; 548 size_t len = count, align = 0;
549 struct virtio_net_hdr gso = { 0 }; 549 struct virtio_net_hdr gso = { 0 };
550 int offset = 0;
550 551
551 if (!(tun->flags & TUN_NO_PI)) { 552 if (!(tun->flags & TUN_NO_PI)) {
552 if ((len -= sizeof(pi)) > count) 553 if ((len -= sizeof(pi)) > count)
553 return -EINVAL; 554 return -EINVAL;
554 555
555 if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi))) 556 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
556 return -EFAULT; 557 return -EFAULT;
558 offset += sizeof(pi);
557 } 559 }
558 560
559 if (tun->flags & TUN_VNET_HDR) { 561 if (tun->flags & TUN_VNET_HDR) {
560 if ((len -= sizeof(gso)) > count) 562 if ((len -= sizeof(gso)) > count)
561 return -EINVAL; 563 return -EINVAL;
562 564
563 if (memcpy_fromiovec((void *)&gso, iv, sizeof(gso))) 565 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
564 return -EFAULT; 566 return -EFAULT;
565 567
566 if (gso.hdr_len > len) 568 if (gso.hdr_len > len)
567 return -EINVAL; 569 return -EINVAL;
570 offset += sizeof(pi);
568 } 571 }
569 572
570 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { 573 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -581,7 +584,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
581 return PTR_ERR(skb); 584 return PTR_ERR(skb);
582 } 585 }
583 586
584 if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) { 587 if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) {
585 tun->dev->stats.rx_dropped++; 588 tun->dev->stats.rx_dropped++;
586 kfree_skb(skb); 589 kfree_skb(skb);
587 return -EFAULT; 590 return -EFAULT;
@@ -673,7 +676,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
673 676
674 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); 677 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
675 678
676 result = tun_get_user(tun, (struct iovec *)iv, iov_length(iv, count), 679 result = tun_get_user(tun, iv, iov_length(iv, count),
677 file->f_flags & O_NONBLOCK); 680 file->f_flags & O_NONBLOCK);
678 681
679 tun_put(tun); 682 tun_put(tun);
@@ -683,7 +686,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
683/* Put packet to the user space buffer */ 686/* Put packet to the user space buffer */
684static __inline__ ssize_t tun_put_user(struct tun_struct *tun, 687static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
685 struct sk_buff *skb, 688 struct sk_buff *skb,
686 struct iovec *iv, int len) 689 const struct iovec *iv, int len)
687{ 690{
688 struct tun_pi pi = { 0, skb->protocol }; 691 struct tun_pi pi = { 0, skb->protocol };
689 ssize_t total = 0; 692 ssize_t total = 0;
@@ -697,7 +700,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
697 pi.flags |= TUN_PKT_STRIP; 700 pi.flags |= TUN_PKT_STRIP;
698 } 701 }
699 702
700 if (memcpy_toiovec(iv, (void *) &pi, sizeof(pi))) 703 if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
701 return -EFAULT; 704 return -EFAULT;
702 total += sizeof(pi); 705 total += sizeof(pi);
703 } 706 }
@@ -730,14 +733,15 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
730 gso.csum_offset = skb->csum_offset; 733 gso.csum_offset = skb->csum_offset;
731 } /* else everything is zero */ 734 } /* else everything is zero */
732 735
733 if (unlikely(memcpy_toiovec(iv, (void *)&gso, sizeof(gso)))) 736 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
737 sizeof(gso))))
734 return -EFAULT; 738 return -EFAULT;
735 total += sizeof(gso); 739 total += sizeof(gso);
736 } 740 }
737 741
738 len = min_t(int, skb->len, len); 742 len = min_t(int, skb->len, len);
739 743
740 skb_copy_datagram_iovec(skb, 0, iv, len); 744 skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
741 total += len; 745 total += len;
742 746
743 tun->dev->stats.tx_packets++; 747 tun->dev->stats.tx_packets++;
@@ -792,7 +796,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
792 } 796 }
793 netif_wake_queue(tun->dev); 797 netif_wake_queue(tun->dev);
794 798
795 ret = tun_put_user(tun, skb, (struct iovec *) iv, len); 799 ret = tun_put_user(tun, skb, iv, len);
796 kfree_skb(skb); 800 kfree_skb(skb);
797 break; 801 break;
798 } 802 }
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 44f8392da117..811f97cb0a29 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3217,7 +3217,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3217 dev->stats.tx_packets++; 3217 dev->stats.tx_packets++;
3218 3218
3219 /* Free the sk buffer associated with this TxBD */ 3219 /* Free the sk buffer associated with this TxBD */
3220 dev_kfree_skb_irq(ugeth-> 3220 dev_kfree_skb(ugeth->
3221 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); 3221 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3222 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3222 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3223 ugeth->skb_dirtytx[txQ] = 3223 ugeth->skb_dirtytx[txQ] =
@@ -3251,9 +3251,15 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
3251 for (i = 0; i < ug_info->numQueuesRx; i++) 3251 for (i = 0; i < ug_info->numQueuesRx; i++)
3252 howmany += ucc_geth_rx(ugeth, i, budget - howmany); 3252 howmany += ucc_geth_rx(ugeth, i, budget - howmany);
3253 3253
3254 /* Tx event processing */
3255 spin_lock(&ugeth->lock);
3256 for (i = 0; i < ug_info->numQueuesTx; i++)
3257 ucc_geth_tx(ugeth->ndev, i);
3258 spin_unlock(&ugeth->lock);
3259
3254 if (howmany < budget) { 3260 if (howmany < budget) {
3255 napi_complete(napi); 3261 napi_complete(napi);
3256 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS); 3262 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
3257 } 3263 }
3258 3264
3259 return howmany; 3265 return howmany;
@@ -3267,8 +3273,6 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3267 struct ucc_geth_info *ug_info; 3273 struct ucc_geth_info *ug_info;
3268 register u32 ucce; 3274 register u32 ucce;
3269 register u32 uccm; 3275 register u32 uccm;
3270 register u32 tx_mask;
3271 u8 i;
3272 3276
3273 ugeth_vdbg("%s: IN", __func__); 3277 ugeth_vdbg("%s: IN", __func__);
3274 3278
@@ -3282,27 +3286,14 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3282 out_be32(uccf->p_ucce, ucce); 3286 out_be32(uccf->p_ucce, ucce);
3283 3287
3284 /* check for receive events that require processing */ 3288 /* check for receive events that require processing */
3285 if (ucce & UCCE_RX_EVENTS) { 3289 if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
3286 if (napi_schedule_prep(&ugeth->napi)) { 3290 if (napi_schedule_prep(&ugeth->napi)) {
3287 uccm &= ~UCCE_RX_EVENTS; 3291 uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
3288 out_be32(uccf->p_uccm, uccm); 3292 out_be32(uccf->p_uccm, uccm);
3289 __napi_schedule(&ugeth->napi); 3293 __napi_schedule(&ugeth->napi);
3290 } 3294 }
3291 } 3295 }
3292 3296
3293 /* Tx event processing */
3294 if (ucce & UCCE_TX_EVENTS) {
3295 spin_lock(&ugeth->lock);
3296 tx_mask = UCC_GETH_UCCE_TXB0;
3297 for (i = 0; i < ug_info->numQueuesTx; i++) {
3298 if (ucce & tx_mask)
3299 ucc_geth_tx(dev, i);
3300 ucce &= ~tx_mask;
3301 tx_mask <<= 1;
3302 }
3303 spin_unlock(&ugeth->lock);
3304 }
3305
3306 /* Errors and other events */ 3297 /* Errors and other events */
3307 if (ucce & UCCE_OTHER) { 3298 if (ucce & UCCE_OTHER) {
3308 if (ucce & UCC_GETH_UCCE_BSY) 3299 if (ucce & UCC_GETH_UCCE_BSY)
@@ -3735,7 +3726,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3735 dev->netdev_ops = &ucc_geth_netdev_ops; 3726 dev->netdev_ops = &ucc_geth_netdev_ops;
3736 dev->watchdog_timeo = TX_TIMEOUT; 3727 dev->watchdog_timeo = TX_TIMEOUT;
3737 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); 3728 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
3738 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); 3729 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
3739 dev->mtu = 1500; 3730 dev->mtu = 1500;
3740 3731
3741 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); 3732 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 2f8ee7c87efe..602764799df0 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -852,7 +852,6 @@ struct ucc_geth_hardware_statistics {
852/* Driver definitions */ 852/* Driver definitions */
853#define TX_BD_RING_LEN 0x10 853#define TX_BD_RING_LEN 0x10
854#define RX_BD_RING_LEN 0x10 854#define RX_BD_RING_LEN 0x10
855#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN
856 855
857#define TX_RING_MOD_MASK(size) (size-1) 856#define TX_RING_MOD_MASK(size) (size-1)
858#define RX_RING_MOD_MASK(size) (size-1) 857#define RX_RING_MOD_MASK(size) (size-1)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 8ee21030e9ac..e00b5b1f6743 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -345,4 +345,12 @@ config USB_HSO
345 To compile this driver as a module, choose M here: the 345 To compile this driver as a module, choose M here: the
346 module will be called hso. 346 module will be called hso.
347 347
348config USB_NET_INT51X1
349 tristate "Intellon PLC based usb adapter"
350 depends on USB_USBNET
351 help
352 Choose this option if you're using a 14Mb USB-based PLC
353 (Powerline Communications) solution with an Intellon
354 INT51x1/INT5200 chip, like the "devolo dLan duo".
355
348endmenu 356endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 88a87eeb376a..f4402a06e52c 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -19,4 +19,5 @@ obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o
19obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o 19obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
20obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o 20obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
21obj-$(CONFIG_USB_USBNET) += usbnet.o 21obj-$(CONFIG_USB_USBNET) += usbnet.o
22obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
22 23
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 55e8ecc3a9e5..01fd528306ec 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -25,7 +25,6 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
28#include <linux/ctype.h>
29#include <linux/ethtool.h> 28#include <linux/ethtool.h>
30#include <linux/workqueue.h> 29#include <linux/workqueue.h>
31#include <linux/mii.h> 30#include <linux/mii.h>
@@ -389,36 +388,6 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
389 } 388 }
390} 389}
391 390
392static u8 nibble(unsigned char c)
393{
394 if (likely(isdigit(c)))
395 return c - '0';
396 c = toupper(c);
397 if (likely(isxdigit(c)))
398 return 10 + c - 'A';
399 return 0;
400}
401
402static inline int
403get_ethernet_addr(struct usbnet *dev, struct usb_cdc_ether_desc *e)
404{
405 int tmp, i;
406 unsigned char buf [13];
407
408 tmp = usb_string(dev->udev, e->iMACAddress, buf, sizeof buf);
409 if (tmp != 12) {
410 dev_dbg(&dev->udev->dev,
411 "bad MAC string %d fetch, %d\n", e->iMACAddress, tmp);
412 if (tmp >= 0)
413 tmp = -EINVAL;
414 return tmp;
415 }
416 for (i = tmp = 0; i < 6; i++, tmp += 2)
417 dev->net->dev_addr [i] =
418 (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]);
419 return 0;
420}
421
422static int cdc_bind(struct usbnet *dev, struct usb_interface *intf) 391static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
423{ 392{
424 int status; 393 int status;
@@ -428,7 +397,7 @@ static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
428 if (status < 0) 397 if (status < 0)
429 return status; 398 return status;
430 399
431 status = get_ethernet_addr(dev, info->ether); 400 status = usbnet_get_ethernet_addr(dev, info->ether->iMACAddress);
432 if (status < 0) { 401 if (status < 0) {
433 usb_set_intfdata(info->data, NULL); 402 usb_set_intfdata(info->data, NULL);
434 usb_driver_release_interface(driver_of(intf), info->data); 403 usb_driver_release_interface(driver_of(intf), info->data);
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
new file mode 100644
index 000000000000..55cf7081de10
--- /dev/null
+++ b/drivers/net/usb/int51x1.c
@@ -0,0 +1,253 @@
1/*
2 * Copyright (c) 2009 Peter Holik
3 *
4 * Intellon usb PLC (Powerline Communications) usb net driver
5 *
6 * http://www.tandel.be/downloads/INT51X1_Datasheet.pdf
7 *
8 * Based on the work of Jan 'RedBully' Seiffert
9 */
10
11/*
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or.
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/module.h>
28#include <linux/ctype.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/ethtool.h>
32#include <linux/mii.h>
33#include <linux/usb.h>
34#include <linux/usb/usbnet.h>
35
36#define INT51X1_VENDOR_ID 0x09e1
37#define INT51X1_PRODUCT_ID 0x5121
38
39#define INT51X1_HEADER_SIZE 2 /* 2 byte header */
40
41#define PACKET_TYPE_PROMISCUOUS (1 << 0)
42#define PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */
43#define PACKET_TYPE_DIRECTED (1 << 2)
44#define PACKET_TYPE_BROADCAST (1 << 3)
45#define PACKET_TYPE_MULTICAST (1 << 4) /* filtered */
46
47#define SET_ETHERNET_PACKET_FILTER 0x43
48
49static int int51x1_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
50{
51 int len;
52
53 if (!(pskb_may_pull(skb, INT51X1_HEADER_SIZE))) {
54 deverr(dev, "unexpected tiny rx frame");
55 return 0;
56 }
57
58 len = le16_to_cpu(*(__le16 *)&skb->data[skb->len - 2]);
59
60 skb_trim(skb, len);
61
62 return 1;
63}
64
65static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
66 struct sk_buff *skb, gfp_t flags)
67{
68 int pack_len = skb->len;
69 int pack_with_header_len = pack_len + INT51X1_HEADER_SIZE;
70 int headroom = skb_headroom(skb);
71 int tailroom = skb_tailroom(skb);
72 int need_tail = 0;
73 __le16 *len;
74
75 /* if packet and our header is smaler than 64 pad to 64 (+ ZLP) */
76 if ((pack_with_header_len) < dev->maxpacket)
77 need_tail = dev->maxpacket - pack_with_header_len + 1;
78 /*
79 * usbnet would send a ZLP if packetlength mod urbsize == 0 for us,
80 * but we need to know ourself, because this would add to the length
81 * we send down to the device...
82 */
83 else if (!(pack_with_header_len % dev->maxpacket))
84 need_tail = 1;
85
86 if (!skb_cloned(skb) &&
87 (headroom + tailroom >= need_tail + INT51X1_HEADER_SIZE)) {
88 if (headroom < INT51X1_HEADER_SIZE || tailroom < need_tail) {
89 skb->data = memmove(skb->head + INT51X1_HEADER_SIZE,
90 skb->data, skb->len);
91 skb_set_tail_pointer(skb, skb->len);
92 }
93 } else {
94 struct sk_buff *skb2;
95
96 skb2 = skb_copy_expand(skb,
97 INT51X1_HEADER_SIZE,
98 need_tail,
99 flags);
100 dev_kfree_skb_any(skb);
101 if (!skb2)
102 return NULL;
103 skb = skb2;
104 }
105
106 pack_len += need_tail;
107 pack_len &= 0x07ff;
108
109 len = (__le16 *) __skb_push(skb, INT51X1_HEADER_SIZE);
110 *len = cpu_to_le16(pack_len);
111
112 if(need_tail)
113 memset(__skb_put(skb, need_tail), 0, need_tail);
114
115 return skb;
116}
117
118static void int51x1_async_cmd_callback(struct urb *urb)
119{
120 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
121 int status = urb->status;
122
123 if (status < 0)
124 dev_warn(&urb->dev->dev, "async callback failed with %d\n", status);
125
126 kfree(req);
127 usb_free_urb(urb);
128}
129
130static void int51x1_set_multicast(struct net_device *netdev)
131{
132 struct usb_ctrlrequest *req;
133 int status;
134 struct urb *urb;
135 struct usbnet *dev = netdev_priv(netdev);
136 u16 filter = PACKET_TYPE_DIRECTED | PACKET_TYPE_BROADCAST;
137
138 if (netdev->flags & IFF_PROMISC) {
139 /* do not expect to see traffic of other PLCs */
140 filter |= PACKET_TYPE_PROMISCUOUS;
141 devinfo(dev, "promiscuous mode enabled");
142 } else if (netdev->mc_count ||
143 (netdev->flags & IFF_ALLMULTI)) {
144 filter |= PACKET_TYPE_ALL_MULTICAST;
145 devdbg(dev, "receive all multicast enabled");
146 } else {
147 /* ~PROMISCUOUS, ~MULTICAST */
148 devdbg(dev, "receive own packets only");
149 }
150
151 urb = usb_alloc_urb(0, GFP_ATOMIC);
152 if (!urb) {
153 devwarn(dev, "Error allocating URB");
154 return;
155 }
156
157 req = kmalloc(sizeof(*req), GFP_ATOMIC);
158 if (!req) {
159 devwarn(dev, "Error allocating control msg");
160 goto out;
161 }
162
163 req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
164 req->bRequest = SET_ETHERNET_PACKET_FILTER;
165 req->wValue = cpu_to_le16(filter);
166 req->wIndex = 0;
167 req->wLength = 0;
168
169 usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
170 (void *)req, NULL, 0,
171 int51x1_async_cmd_callback,
172 (void *)req);
173
174 status = usb_submit_urb(urb, GFP_ATOMIC);
175 if (status < 0) {
176 devwarn(dev, "Error submitting control msg, sts=%d", status);
177 goto out1;
178 }
179 return;
180out1:
181 kfree(req);
182out:
183 usb_free_urb(urb);
184}
185
186static const struct net_device_ops int51x1_netdev_ops = {
187 .ndo_open = usbnet_open,
188 .ndo_stop = usbnet_stop,
189 .ndo_start_xmit = usbnet_start_xmit,
190 .ndo_tx_timeout = usbnet_tx_timeout,
191 .ndo_change_mtu = usbnet_change_mtu,
192 .ndo_set_mac_address = eth_mac_addr,
193 .ndo_validate_addr = eth_validate_addr,
194 .ndo_set_multicast_list = int51x1_set_multicast,
195};
196
197static int int51x1_bind(struct usbnet *dev, struct usb_interface *intf)
198{
199 int status = usbnet_get_ethernet_addr(dev, 3);
200
201 if (status)
202 return status;
203
204 dev->net->hard_header_len += INT51X1_HEADER_SIZE;
205 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
206 dev->net->netdev_ops = &int51x1_netdev_ops;
207
208 return usbnet_get_endpoints(dev, intf);
209}
210
211static const struct driver_info int51x1_info = {
212 .description = "Intellon usb powerline adapter",
213 .bind = int51x1_bind,
214 .rx_fixup = int51x1_rx_fixup,
215 .tx_fixup = int51x1_tx_fixup,
216 .in = 1,
217 .out = 2,
218 .flags = FLAG_ETHER,
219};
220
221static const struct usb_device_id products[] = {
222 {
223 USB_DEVICE(INT51X1_VENDOR_ID, INT51X1_PRODUCT_ID),
224 .driver_info = (unsigned long) &int51x1_info,
225 },
226 {},
227};
228MODULE_DEVICE_TABLE(usb, products);
229
230static struct usb_driver int51x1_driver = {
231 .name = "int51x1",
232 .id_table = products,
233 .probe = usbnet_probe,
234 .disconnect = usbnet_disconnect,
235 .suspend = usbnet_suspend,
236 .resume = usbnet_resume,
237};
238
239static int __init int51x1_init(void)
240{
241 return usb_register(&int51x1_driver);
242}
243module_init(int51x1_init);
244
245static void __exit int51x1_exit(void)
246{
247 usb_deregister(&int51x1_driver);
248}
249module_exit(int51x1_exit);
250
251MODULE_AUTHOR("Peter Holik");
252MODULE_DESCRIPTION("Intellon usb powerline adapter");
253MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 3d0d0b0b37c5..e01314789718 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -31,7 +31,6 @@
31 ****************************************************************/ 31 ****************************************************************/
32 32
33/* TODO: 33/* TODO:
34 * Fix in_interrupt() problem
35 * Develop test procedures for USB net interfaces 34 * Develop test procedures for USB net interfaces
36 * Run test procedures 35 * Run test procedures
37 * Fix bugs from previous two steps 36 * Fix bugs from previous two steps
@@ -606,14 +605,30 @@ static void kaweth_usb_receive(struct urb *urb)
606 605
607 struct sk_buff *skb; 606 struct sk_buff *skb;
608 607
609 if(unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) 608 if (unlikely(status == -EPIPE)) {
610 /* we are killed - set a flag and wake the disconnect handler */ 609 kaweth->stats.rx_errors++;
611 {
612 kaweth->end = 1; 610 kaweth->end = 1;
613 wake_up(&kaweth->term_wait); 611 wake_up(&kaweth->term_wait);
612 dbg("Status was -EPIPE.");
614 return; 613 return;
615 } 614 }
616 615 if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) {
616 /* we are killed - set a flag and wake the disconnect handler */
617 kaweth->end = 1;
618 wake_up(&kaweth->term_wait);
619 dbg("Status was -ECONNRESET or -ESHUTDOWN.");
620 return;
621 }
622 if (unlikely(status == -EPROTO || status == -ETIME ||
623 status == -EILSEQ)) {
624 kaweth->stats.rx_errors++;
625 dbg("Status was -EPROTO, -ETIME, or -EILSEQ.");
626 return;
627 }
628 if (unlikely(status == -EOVERFLOW)) {
629 kaweth->stats.rx_errors++;
630 dbg("Status was -EOVERFLOW.");
631 }
617 spin_lock(&kaweth->device_lock); 632 spin_lock(&kaweth->device_lock);
618 if (IS_BLOCKED(kaweth->status)) { 633 if (IS_BLOCKED(kaweth->status)) {
619 spin_unlock(&kaweth->device_lock); 634 spin_unlock(&kaweth->device_lock);
@@ -883,13 +898,16 @@ static void kaweth_set_rx_mode(struct net_device *net)
883 ****************************************************************/ 898 ****************************************************************/
884static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth) 899static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
885{ 900{
901 int result;
886 __u16 packet_filter_bitmap = kaweth->packet_filter_bitmap; 902 __u16 packet_filter_bitmap = kaweth->packet_filter_bitmap;
903
887 kaweth->packet_filter_bitmap = 0; 904 kaweth->packet_filter_bitmap = 0;
888 if (packet_filter_bitmap == 0) 905 if (packet_filter_bitmap == 0)
889 return; 906 return;
890 907
891 { 908 if (in_interrupt())
892 int result; 909 return;
910
893 result = kaweth_control(kaweth, 911 result = kaweth_control(kaweth,
894 usb_sndctrlpipe(kaweth->dev, 0), 912 usb_sndctrlpipe(kaweth->dev, 0),
895 KAWETH_COMMAND_SET_PACKET_FILTER, 913 KAWETH_COMMAND_SET_PACKET_FILTER,
@@ -906,7 +924,6 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
906 else { 924 else {
907 dbg("Set Rx mode to %d", packet_filter_bitmap); 925 dbg("Set Rx mode to %d", packet_filter_bitmap);
908 } 926 }
909 }
910} 927}
911 928
912/**************************************************************** 929/****************************************************************
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index f3a2fce6166c..c94de6243140 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -37,6 +37,7 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/etherdevice.h> 39#include <linux/etherdevice.h>
40#include <linux/ctype.h>
40#include <linux/ethtool.h> 41#include <linux/ethtool.h>
41#include <linux/workqueue.h> 42#include <linux/workqueue.h>
42#include <linux/mii.h> 43#include <linux/mii.h>
@@ -156,6 +157,36 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
156} 157}
157EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 158EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
158 159
160static u8 nibble(unsigned char c)
161{
162 if (likely(isdigit(c)))
163 return c - '0';
164 c = toupper(c);
165 if (likely(isxdigit(c)))
166 return 10 + c - 'A';
167 return 0;
168}
169
170int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
171{
172 int tmp, i;
173 unsigned char buf [13];
174
175 tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
176 if (tmp != 12) {
177 dev_dbg(&dev->udev->dev,
178 "bad MAC string %d fetch, %d\n", iMACAddress, tmp);
179 if (tmp >= 0)
180 tmp = -EINVAL;
181 return tmp;
182 }
183 for (i = tmp = 0; i < 6; i++, tmp += 2)
184 dev->net->dev_addr [i] =
185 (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]);
186 return 0;
187}
188EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
189
159static void intr_complete (struct urb *urb); 190static void intr_complete (struct urb *urb);
160 191
161static int init_status (struct usbnet *dev, struct usb_interface *intf) 192static int init_status (struct usbnet *dev, struct usb_interface *intf)
@@ -1185,12 +1216,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1185#endif 1216#endif
1186 1217
1187 net->netdev_ops = &usbnet_netdev_ops; 1218 net->netdev_ops = &usbnet_netdev_ops;
1188#ifdef CONFIG_COMPAT_NET_DEV_OPS
1189 net->hard_start_xmit = usbnet_start_xmit;
1190 net->open = usbnet_open;
1191 net->stop = usbnet_stop;
1192 net->tx_timeout = usbnet_tx_timeout;
1193#endif
1194 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1219 net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
1195 net->ethtool_ops = &usbnet_ethtool_ops; 1220 net->ethtool_ops = &usbnet_ethtool_ops;
1196 1221
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9c82a39497e5..071855871524 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -621,12 +621,9 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
621 virtio_net_ctrl_ack status = ~0; 621 virtio_net_ctrl_ack status = ~0;
622 unsigned int tmp; 622 unsigned int tmp;
623 623
624 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { 624 /* Caller should know better */
625 BUG(); /* Caller should know better */ 625 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
626 return false; 626 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
627 }
628
629 BUG_ON(out + in > VIRTNET_SEND_COMMAND_SG_MAX);
630 627
631 out++; /* Add header */ 628 out++; /* Add header */
632 in++; /* Add return status */ 629 in++; /* Add return status */
@@ -640,8 +637,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
640 memcpy(&sg[1], data, sizeof(struct scatterlist) * (out + in - 2)); 637 memcpy(&sg[1], data, sizeof(struct scatterlist) * (out + in - 2));
641 sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 638 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
642 639
643 if (vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) != 0) 640 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi));
644 BUG();
645 641
646 vi->cvq->vq_ops->kick(vi->cvq); 642 vi->cvq->vq_ops->kick(vi->cvq);
647 643
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index c23fde0c0344..79dabc557bd3 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -225,6 +225,7 @@ static char rcsid[] =
225#include <linux/skbuff.h> 225#include <linux/skbuff.h>
226#include <linux/if_arp.h> 226#include <linux/if_arp.h>
227#include <linux/netdevice.h> 227#include <linux/netdevice.h>
228#include <linux/etherdevice.h>
228#include <linux/spinlock.h> 229#include <linux/spinlock.h>
229#include <linux/if.h> 230#include <linux/if.h>
230#include <net/arp.h> 231#include <net/arp.h>
@@ -3246,6 +3247,16 @@ static inline void show_version(void)
3246 rcsvers, rcsdate, __DATE__, __TIME__); 3247 rcsvers, rcsdate, __DATE__, __TIME__);
3247} /* show_version */ 3248} /* show_version */
3248 3249
3250static const struct net_device_ops cpc_netdev_ops = {
3251 .ndo_open = cpc_open,
3252 .ndo_stop = cpc_close,
3253 .ndo_tx_timeout = cpc_tx_timeout,
3254 .ndo_set_mac_address = NULL,
3255 .ndo_change_mtu = cpc_change_mtu,
3256 .ndo_do_ioctl = cpc_ioctl,
3257 .ndo_validate_addr = eth_validate_addr,
3258};
3259
3249static void cpc_init_card(pc300_t * card) 3260static void cpc_init_card(pc300_t * card)
3250{ 3261{
3251 int i, devcount = 0; 3262 int i, devcount = 0;
@@ -3357,18 +3368,11 @@ static void cpc_init_card(pc300_t * card)
3357 dev->mem_start = card->hw.ramphys; 3368 dev->mem_start = card->hw.ramphys;
3358 dev->mem_end = card->hw.ramphys + card->hw.ramsize - 1; 3369 dev->mem_end = card->hw.ramphys + card->hw.ramsize - 1;
3359 dev->irq = card->hw.irq; 3370 dev->irq = card->hw.irq;
3360 dev->init = NULL;
3361 dev->tx_queue_len = PC300_TX_QUEUE_LEN; 3371 dev->tx_queue_len = PC300_TX_QUEUE_LEN;
3362 dev->mtu = PC300_DEF_MTU; 3372 dev->mtu = PC300_DEF_MTU;
3363 3373
3364 dev->open = cpc_open; 3374 dev->netdev_ops = &cpc_netdev_ops;
3365 dev->stop = cpc_close;
3366 dev->tx_timeout = cpc_tx_timeout;
3367 dev->watchdog_timeo = PC300_TX_TIMEOUT; 3375 dev->watchdog_timeo = PC300_TX_TIMEOUT;
3368 dev->set_multicast_list = NULL;
3369 dev->set_mac_address = NULL;
3370 dev->change_mtu = cpc_change_mtu;
3371 dev->do_ioctl = cpc_ioctl;
3372 3376
3373 if (register_hdlc_device(dev) == 0) { 3377 if (register_hdlc_device(dev) == 0) {
3374 printk("%s: Cyclades-PC300/", dev->name); 3378 printk("%s: Cyclades-PC300/", dev->name);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 744f4f4dd3d1..69248ded5102 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2311,8 +2311,7 @@ static void at76_delete_device(struct at76_priv *priv)
2311 2311
2312 del_timer_sync(&ledtrig_tx_timer); 2312 del_timer_sync(&ledtrig_tx_timer);
2313 2313
2314 if (priv->rx_skb) 2314 kfree_skb(priv->rx_skb);
2315 kfree_skb(priv->rx_skb);
2316 2315
2317 usb_put_dev(priv->udev); 2316 usb_put_dev(priv->udev);
2318 2317
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
index 7a17d31b2fd9..cb5e15f97095 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -54,9 +54,8 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
54 u32 coef_scaled, coef_exp, coef_man, 54 u32 coef_scaled, coef_exp, coef_man,
55 ds_coef_exp, ds_coef_man, clock; 55 ds_coef_exp, ds_coef_man, clock;
56 56
57 if (!(ah->ah_version == AR5K_AR5212) || 57 BUG_ON(!(ah->ah_version == AR5K_AR5212) ||
58 !(channel->hw_value & CHANNEL_OFDM)) 58 !(channel->hw_value & CHANNEL_OFDM));
59 BUG();
60 59
61 /* Get coefficient 60 /* Get coefficient
62 * ALGO: coef = (5 * clock * carrier_freq) / 2) 61 * ALGO: coef = (5 * clock * carrier_freq) / 2)
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index 2689a08a2844..7b1b40aaf09d 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -53,11 +53,7 @@ struct ath_node;
53 53
54#define A_MAX(a, b) ((a) > (b) ? (a) : (b)) 54#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
55 55
56#define ASSERT(exp) do { \ 56#define ASSERT(exp) BUG_ON(!(exp))
57 if (unlikely(!(exp))) { \
58 BUG(); \
59 } \
60 } while (0)
61 57
62#define TSF_TO_TU(_h,_l) \ 58#define TSF_TO_TU(_h,_l) \
63 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) 59 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index bd4dbcfe1bbe..9a123fbcc359 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -3176,11 +3176,8 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3176 /* Start the Dma */ 3176 /* Start the Dma */
3177 rc = ipw_fw_dma_enable(priv); 3177 rc = ipw_fw_dma_enable(priv);
3178 3178
3179 if (priv->sram_desc.last_cb_index > 0) { 3179 /* the DMA is already ready this would be a bug. */
3180 /* the DMA is already ready this would be a bug. */ 3180 BUG_ON(priv->sram_desc.last_cb_index > 0);
3181 BUG();
3182 goto out;
3183 }
3184 3181
3185 do { 3182 do {
3186 chunk = (struct fw_chunk *)(data + offset); 3183 chunk = (struct fw_chunk *)(data + offset);
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 92a26922e792..8ce6e961c5da 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -154,10 +154,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
154 goto failed; 154 goto failed;
155 } 155 }
156 ieee = netdev_priv(dev); 156 ieee = netdev_priv(dev);
157#ifdef CONFIG_COMPAT_NET_DEV_OPS
158 dev->hard_start_xmit = ieee80211_xmit;
159 dev->change_mtu = ieee80211_change_mtu;
160#endif
161 157
162 ieee->dev = dev; 158 ieee->dev = dev;
163 159
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index ea3dc038be76..d649caebf08a 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -686,8 +686,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
686 return; 686 return;
687 } 687 }
688 688
689 if (!in_interrupt()) 689 BUG_ON(!in_interrupt());
690 BUG();
691 690
692 spin_lock(&priv->driver_lock); 691 spin_lock(&priv->driver_lock);
693 692
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 59634c33b1f9..392337b37b1d 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -461,8 +461,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
461 return; 461 return;
462 } 462 }
463 463
464 if (!in_interrupt()) 464 BUG_ON(!in_interrupt());
465 BUG();
466 465
467 spin_lock(&priv->driver_lock); 466 spin_lock(&priv->driver_lock);
468 memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN, 467 memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,