aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c73
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/appletalk/cops.c16
-rw-r--r--drivers/net/atlx/atl1.c157
-rw-r--r--drivers/net/atlx/atl1.h2
-rw-r--r--drivers/net/atlx/atlx.c2
-rw-r--r--drivers/net/atlx/atlx.h7
-rw-r--r--drivers/net/bonding/bond_main.c24
-rw-r--r--drivers/net/bonding/bond_sysfs.c28
-rw-r--r--drivers/net/cxgb3/adapter.h1
-rw-r--r--drivers/net/cxgb3/common.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c44
-rw-r--r--drivers/net/cxgb3/regs.h8
-rw-r--r--drivers/net/cxgb3/sge.c29
-rw-r--r--drivers/net/cxgb3/t3_hw.c28
-rw-r--r--drivers/net/dm9000.c37
-rw-r--r--drivers/net/e1000e/defines.h10
-rw-r--r--drivers/net/e1000e/e1000.h7
-rw-r--r--drivers/net/e1000e/ethtool.c45
-rw-r--r--drivers/net/e1000e/hw.h22
-rw-r--r--drivers/net/e1000e/ich8lan.c83
-rw-r--r--drivers/net/e1000e/netdev.c330
-rw-r--r--drivers/net/e1000e/phy.c278
-rw-r--r--drivers/net/eexpress.c11
-rw-r--r--drivers/net/ehea/ehea.h27
-rw-r--r--drivers/net/ehea/ehea_main.c25
-rw-r--r--drivers/net/ehea/ehea_qmr.c286
-rw-r--r--drivers/net/fs_enet/mii-fec.c3
-rw-r--r--drivers/net/gianfar.c7
-rw-r--r--drivers/net/gianfar.h3
-rw-r--r--drivers/net/gianfar_sysfs.c10
-rw-r--r--drivers/net/irda/nsc-ircc.c6
-rw-r--r--drivers/net/irda/smsc-ircc2.c5
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mv643xx_eth.c160
-rw-r--r--drivers/net/myri10ge/myri10ge.c730
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h56
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h39
-rw-r--r--drivers/net/niu.c64
-rw-r--r--drivers/net/niu.h9
-rw-r--r--drivers/net/pcnet32.c61
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/ppp_generic.c1
-rw-r--r--drivers/net/pppol2tp.c13
-rw-r--r--drivers/net/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/sfc/Makefile4
-rw-r--r--drivers/net/sfc/boards.h2
-rw-r--r--drivers/net/sfc/efx.c4
-rw-r--r--drivers/net/sfc/enum.h49
-rw-r--r--drivers/net/sfc/ethtool.c259
-rw-r--r--drivers/net/sfc/falcon.c8
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h16
-rw-r--r--drivers/net/sfc/falcon_xmac.c82
-rw-r--r--drivers/net/sfc/mdio_10g.c78
-rw-r--r--drivers/net/sfc/mdio_10g.h24
-rw-r--r--drivers/net/sfc/net_driver.h28
-rw-r--r--drivers/net/sfc/rx.c11
-rw-r--r--drivers/net/sfc/selftest.c717
-rw-r--r--drivers/net/sfc/selftest.h50
-rw-r--r--drivers/net/sfc/sfe4001.c14
-rw-r--r--drivers/net/sfc/tenxpress.c91
-rw-r--r--drivers/net/sfc/tx.c664
-rw-r--r--drivers/net/sfc/xfp_phy.c36
-rw-r--r--drivers/net/sky2.h4
-rw-r--r--drivers/net/tulip/uli526x.c38
-rw-r--r--drivers/net/ucc_geth.c278
-rw-r--r--drivers/net/ucc_geth.h48
-rw-r--r--drivers/net/ucc_geth_ethtool.c6
-rw-r--r--drivers/net/ucc_geth_mii.c4
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/wan/Kconfig4
-rw-r--r--drivers/net/wan/cosa.c14
-rw-r--r--drivers/net/wan/hdlc_ppp.c2
-rw-r--r--drivers/net/wan/hostess_sv11.c12
-rw-r--r--drivers/net/wan/lapbether.c1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wan/sealevel.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c31
-rw-r--r--drivers/net/wireless/strip.c2
-rw-r--r--drivers/net/wireless/wavelan.c4
-rw-r--r--drivers/net/wireless/wavelan_cs.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c6
90 files changed, 4375 insertions, 1036 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 6f8e7d4cf74d..2edda8cc7f99 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -319,7 +319,7 @@ static struct vortex_chip_info {
319 {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", 319 {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
320 PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, 320 PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
321 {"3c980 Cyclone", 321 {"3c980 Cyclone",
322 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, 322 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
323 323
324 {"3c980C Python-T", 324 {"3c980C Python-T",
325 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, 325 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
@@ -600,7 +600,6 @@ struct vortex_private {
600 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 600 struct sk_buff* tx_skbuff[TX_RING_SIZE];
601 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 601 unsigned int cur_rx, cur_tx; /* The next free ring entry */
602 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 602 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
603 struct net_device_stats stats; /* Generic stats */
604 struct vortex_extra_stats xstats; /* NIC-specific extra stats */ 603 struct vortex_extra_stats xstats; /* NIC-specific extra stats */
605 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ 604 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
606 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ 605 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -1875,7 +1874,7 @@ static void vortex_tx_timeout(struct net_device *dev)
1875 1874
1876 issue_and_wait(dev, TxReset); 1875 issue_and_wait(dev, TxReset);
1877 1876
1878 vp->stats.tx_errors++; 1877 dev->stats.tx_errors++;
1879 if (vp->full_bus_master_tx) { 1878 if (vp->full_bus_master_tx) {
1880 printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); 1879 printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
1881 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) 1880 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
@@ -1887,7 +1886,7 @@ static void vortex_tx_timeout(struct net_device *dev)
1887 iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); 1886 iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
1888 iowrite16(DownUnstall, ioaddr + EL3_CMD); 1887 iowrite16(DownUnstall, ioaddr + EL3_CMD);
1889 } else { 1888 } else {
1890 vp->stats.tx_dropped++; 1889 dev->stats.tx_dropped++;
1891 netif_wake_queue(dev); 1890 netif_wake_queue(dev);
1892 } 1891 }
1893 1892
@@ -1928,8 +1927,8 @@ vortex_error(struct net_device *dev, int status)
1928 } 1927 }
1929 dump_tx_ring(dev); 1928 dump_tx_ring(dev);
1930 } 1929 }
1931 if (tx_status & 0x14) vp->stats.tx_fifo_errors++; 1930 if (tx_status & 0x14) dev->stats.tx_fifo_errors++;
1932 if (tx_status & 0x38) vp->stats.tx_aborted_errors++; 1931 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
1933 if (tx_status & 0x08) vp->xstats.tx_max_collisions++; 1932 if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
1934 iowrite8(0, ioaddr + TxStatus); 1933 iowrite8(0, ioaddr + TxStatus);
1935 if (tx_status & 0x30) { /* txJabber or txUnderrun */ 1934 if (tx_status & 0x30) { /* txJabber or txUnderrun */
@@ -2051,8 +2050,8 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2051 if (vortex_debug > 2) 2050 if (vortex_debug > 2)
2052 printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", 2051 printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
2053 dev->name, tx_status); 2052 dev->name, tx_status);
2054 if (tx_status & 0x04) vp->stats.tx_fifo_errors++; 2053 if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
2055 if (tx_status & 0x38) vp->stats.tx_aborted_errors++; 2054 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
2056 if (tx_status & 0x30) { 2055 if (tx_status & 0x30) {
2057 issue_and_wait(dev, TxReset); 2056 issue_and_wait(dev, TxReset);
2058 } 2057 }
@@ -2350,7 +2349,7 @@ boomerang_interrupt(int irq, void *dev_id)
2350 } else { 2349 } else {
2351 printk(KERN_DEBUG "boomerang_interrupt: no skb!\n"); 2350 printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
2352 } 2351 }
2353 /* vp->stats.tx_packets++; Counted below. */ 2352 /* dev->stats.tx_packets++; Counted below. */
2354 dirty_tx++; 2353 dirty_tx++;
2355 } 2354 }
2356 vp->dirty_tx = dirty_tx; 2355 vp->dirty_tx = dirty_tx;
@@ -2409,12 +2408,12 @@ static int vortex_rx(struct net_device *dev)
2409 unsigned char rx_error = ioread8(ioaddr + RxErrors); 2408 unsigned char rx_error = ioread8(ioaddr + RxErrors);
2410 if (vortex_debug > 2) 2409 if (vortex_debug > 2)
2411 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); 2410 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
2412 vp->stats.rx_errors++; 2411 dev->stats.rx_errors++;
2413 if (rx_error & 0x01) vp->stats.rx_over_errors++; 2412 if (rx_error & 0x01) dev->stats.rx_over_errors++;
2414 if (rx_error & 0x02) vp->stats.rx_length_errors++; 2413 if (rx_error & 0x02) dev->stats.rx_length_errors++;
2415 if (rx_error & 0x04) vp->stats.rx_frame_errors++; 2414 if (rx_error & 0x04) dev->stats.rx_frame_errors++;
2416 if (rx_error & 0x08) vp->stats.rx_crc_errors++; 2415 if (rx_error & 0x08) dev->stats.rx_crc_errors++;
2417 if (rx_error & 0x10) vp->stats.rx_length_errors++; 2416 if (rx_error & 0x10) dev->stats.rx_length_errors++;
2418 } else { 2417 } else {
2419 /* The packet length: up to 4.5K!. */ 2418 /* The packet length: up to 4.5K!. */
2420 int pkt_len = rx_status & 0x1fff; 2419 int pkt_len = rx_status & 0x1fff;
@@ -2446,7 +2445,7 @@ static int vortex_rx(struct net_device *dev)
2446 skb->protocol = eth_type_trans(skb, dev); 2445 skb->protocol = eth_type_trans(skb, dev);
2447 netif_rx(skb); 2446 netif_rx(skb);
2448 dev->last_rx = jiffies; 2447 dev->last_rx = jiffies;
2449 vp->stats.rx_packets++; 2448 dev->stats.rx_packets++;
2450 /* Wait a limited time to go to next packet. */ 2449 /* Wait a limited time to go to next packet. */
2451 for (i = 200; i >= 0; i--) 2450 for (i = 200; i >= 0; i--)
2452 if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) 2451 if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
@@ -2455,7 +2454,7 @@ static int vortex_rx(struct net_device *dev)
2455 } else if (vortex_debug > 0) 2454 } else if (vortex_debug > 0)
2456 printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " 2455 printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
2457 "size %d.\n", dev->name, pkt_len); 2456 "size %d.\n", dev->name, pkt_len);
2458 vp->stats.rx_dropped++; 2457 dev->stats.rx_dropped++;
2459 } 2458 }
2460 issue_and_wait(dev, RxDiscard); 2459 issue_and_wait(dev, RxDiscard);
2461 } 2460 }
@@ -2482,12 +2481,12 @@ boomerang_rx(struct net_device *dev)
2482 unsigned char rx_error = rx_status >> 16; 2481 unsigned char rx_error = rx_status >> 16;
2483 if (vortex_debug > 2) 2482 if (vortex_debug > 2)
2484 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); 2483 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
2485 vp->stats.rx_errors++; 2484 dev->stats.rx_errors++;
2486 if (rx_error & 0x01) vp->stats.rx_over_errors++; 2485 if (rx_error & 0x01) dev->stats.rx_over_errors++;
2487 if (rx_error & 0x02) vp->stats.rx_length_errors++; 2486 if (rx_error & 0x02) dev->stats.rx_length_errors++;
2488 if (rx_error & 0x04) vp->stats.rx_frame_errors++; 2487 if (rx_error & 0x04) dev->stats.rx_frame_errors++;
2489 if (rx_error & 0x08) vp->stats.rx_crc_errors++; 2488 if (rx_error & 0x08) dev->stats.rx_crc_errors++;
2490 if (rx_error & 0x10) vp->stats.rx_length_errors++; 2489 if (rx_error & 0x10) dev->stats.rx_length_errors++;
2491 } else { 2490 } else {
2492 /* The packet length: up to 4.5K!. */ 2491 /* The packet length: up to 4.5K!. */
2493 int pkt_len = rx_status & 0x1fff; 2492 int pkt_len = rx_status & 0x1fff;
@@ -2529,7 +2528,7 @@ boomerang_rx(struct net_device *dev)
2529 } 2528 }
2530 netif_rx(skb); 2529 netif_rx(skb);
2531 dev->last_rx = jiffies; 2530 dev->last_rx = jiffies;
2532 vp->stats.rx_packets++; 2531 dev->stats.rx_packets++;
2533 } 2532 }
2534 entry = (++vp->cur_rx) % RX_RING_SIZE; 2533 entry = (++vp->cur_rx) % RX_RING_SIZE;
2535 } 2534 }
@@ -2591,7 +2590,7 @@ vortex_down(struct net_device *dev, int final_down)
2591 del_timer_sync(&vp->rx_oom_timer); 2590 del_timer_sync(&vp->rx_oom_timer);
2592 del_timer_sync(&vp->timer); 2591 del_timer_sync(&vp->timer);
2593 2592
2594 /* Turn off statistics ASAP. We update vp->stats below. */ 2593 /* Turn off statistics ASAP. We update dev->stats below. */
2595 iowrite16(StatsDisable, ioaddr + EL3_CMD); 2594 iowrite16(StatsDisable, ioaddr + EL3_CMD);
2596 2595
2597 /* Disable the receiver and transmitter. */ 2596 /* Disable the receiver and transmitter. */
@@ -2728,7 +2727,7 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2728 update_stats(ioaddr, dev); 2727 update_stats(ioaddr, dev);
2729 spin_unlock_irqrestore (&vp->lock, flags); 2728 spin_unlock_irqrestore (&vp->lock, flags);
2730 } 2729 }
2731 return &vp->stats; 2730 return &dev->stats;
2732} 2731}
2733 2732
2734/* Update statistics. 2733/* Update statistics.
@@ -2748,18 +2747,18 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2748 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ 2747 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2749 /* Switch to the stats window, and read everything. */ 2748 /* Switch to the stats window, and read everything. */
2750 EL3WINDOW(6); 2749 EL3WINDOW(6);
2751 vp->stats.tx_carrier_errors += ioread8(ioaddr + 0); 2750 dev->stats.tx_carrier_errors += ioread8(ioaddr + 0);
2752 vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); 2751 dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
2753 vp->stats.tx_window_errors += ioread8(ioaddr + 4); 2752 dev->stats.tx_window_errors += ioread8(ioaddr + 4);
2754 vp->stats.rx_fifo_errors += ioread8(ioaddr + 5); 2753 dev->stats.rx_fifo_errors += ioread8(ioaddr + 5);
2755 vp->stats.tx_packets += ioread8(ioaddr + 6); 2754 dev->stats.tx_packets += ioread8(ioaddr + 6);
2756 vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; 2755 dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
2757 /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */ 2756 /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */
2758 /* Don't bother with register 9, an extension of registers 6&7. 2757 /* Don't bother with register 9, an extension of registers 6&7.
2759 If we do use the 6&7 values the atomic update assumption above 2758 If we do use the 6&7 values the atomic update assumption above
2760 is invalid. */ 2759 is invalid. */
2761 vp->stats.rx_bytes += ioread16(ioaddr + 10); 2760 dev->stats.rx_bytes += ioread16(ioaddr + 10);
2762 vp->stats.tx_bytes += ioread16(ioaddr + 12); 2761 dev->stats.tx_bytes += ioread16(ioaddr + 12);
2763 /* Extra stats for get_ethtool_stats() */ 2762 /* Extra stats for get_ethtool_stats() */
2764 vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); 2763 vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
2765 vp->xstats.tx_single_collisions += ioread8(ioaddr + 3); 2764 vp->xstats.tx_single_collisions += ioread8(ioaddr + 3);
@@ -2767,14 +2766,14 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2767 EL3WINDOW(4); 2766 EL3WINDOW(4);
2768 vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12); 2767 vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
2769 2768
2770 vp->stats.collisions = vp->xstats.tx_multiple_collisions 2769 dev->stats.collisions = vp->xstats.tx_multiple_collisions
2771 + vp->xstats.tx_single_collisions 2770 + vp->xstats.tx_single_collisions
2772 + vp->xstats.tx_max_collisions; 2771 + vp->xstats.tx_max_collisions;
2773 2772
2774 { 2773 {
2775 u8 up = ioread8(ioaddr + 13); 2774 u8 up = ioread8(ioaddr + 13);
2776 vp->stats.rx_bytes += (up & 0x0f) << 16; 2775 dev->stats.rx_bytes += (up & 0x0f) << 16;
2777 vp->stats.tx_bytes += (up & 0xf0) << 12; 2776 dev->stats.tx_bytes += (up & 0xf0) << 12;
2778 } 2777 }
2779 2778
2780 EL3WINDOW(old_window >> 13); 2779 EL3WINDOW(old_window >> 13);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index af46341827f2..9f6cc8a56073 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1273,20 +1273,6 @@ config PCNET32
1273 To compile this driver as a module, choose M here. The module 1273 To compile this driver as a module, choose M here. The module
1274 will be called pcnet32. 1274 will be called pcnet32.
1275 1275
1276config PCNET32_NAPI
1277 bool "Use RX polling (NAPI)"
1278 depends on PCNET32
1279 help
1280 NAPI is a new driver API designed to reduce CPU and interrupt load
1281 when the driver is receiving lots of packets from the card. It is
1282 still somewhat experimental and thus not yet enabled by default.
1283
1284 If your estimated Rx load is 10kpps or more, or if the card will be
1285 deployed on potentially unfriendly networks (e.g. in a firewall),
1286 then say Y here.
1287
1288 If in doubt, say N.
1289
1290config AMD8111_ETH 1276config AMD8111_ETH
1291 tristate "AMD 8111 (new PCI lance) support" 1277 tristate "AMD 8111 (new PCI lance) support"
1292 depends on NET_PCI && PCI 1278 depends on NET_PCI && PCI
@@ -2440,7 +2426,7 @@ config CHELSIO_T3
2440 2426
2441config EHEA 2427config EHEA
2442 tristate "eHEA Ethernet support" 2428 tristate "eHEA Ethernet support"
2443 depends on IBMEBUS && INET && SPARSEMEM 2429 depends on IBMEBUS && INET && SPARSEMEM && MEMORY_HOTPLUG
2444 select INET_LRO 2430 select INET_LRO
2445 ---help--- 2431 ---help---
2446 This driver supports the IBM pSeries eHEA ethernet adapter. 2432 This driver supports the IBM pSeries eHEA ethernet adapter.
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 82e9a5bd0dd2..a0b4c8516073 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -499,19 +499,13 @@ static void cops_reset(struct net_device *dev, int sleep)
499 { 499 {
500 outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */ 500 outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */
501 inb(ioaddr+DAYNA_RESET); /* Clear the reset */ 501 inb(ioaddr+DAYNA_RESET); /* Clear the reset */
502 if(sleep) 502 if (sleep)
503 { 503 msleep(333);
504 long snap=jiffies; 504 else
505 505 mdelay(333);
506 /* Let card finish initializing, about 1/3 second */
507 while (time_before(jiffies, snap + HZ/3))
508 schedule();
509 }
510 else
511 mdelay(333);
512 } 506 }
507
513 netif_wake_queue(dev); 508 netif_wake_queue(dev);
514 return;
515} 509}
516 510
517static void cops_load (struct net_device *dev) 511static void cops_load (struct net_device *dev)
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0afe522b8f7b..9c2394d49428 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 4 * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
5 * 5 *
6 * Derived from Intel e1000 driver 6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
@@ -36,7 +36,6 @@
36 * A very incomplete list of things that need to be dealt with: 36 * A very incomplete list of things that need to be dealt with:
37 * 37 *
38 * TODO: 38 * TODO:
39 * Wake on LAN.
40 * Add more ethtool functions. 39 * Add more ethtool functions.
41 * Fix abstruse irq enable/disable condition described here: 40 * Fix abstruse irq enable/disable condition described here:
42 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 41 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
@@ -638,21 +637,18 @@ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
638} 637}
639 638
640/* 639/*
641 *TODO: do something or get rid of this 640 * Force the PHY into power saving mode using vendor magic.
642 */ 641 */
643#ifdef CONFIG_PM 642#ifdef CONFIG_PM
644static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) 643static void atl1_phy_enter_power_saving(struct atl1_hw *hw)
645{ 644{
646/* s32 ret_val; 645 atl1_write_phy_reg(hw, MII_DBG_ADDR, 0);
647 * u16 phy_data; 646 atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E);
648 */ 647 atl1_write_phy_reg(hw, MII_DBG_ADDR, 2);
648 atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
649 atl1_write_phy_reg(hw, MII_DBG_ADDR, 3);
650 atl1_write_phy_reg(hw, MII_DBG_DATA, 0);
649 651
650/*
651 ret_val = atl1_write_phy_reg(hw, ...);
652 ret_val = atl1_write_phy_reg(hw, ...);
653 ....
654*/
655 return 0;
656} 652}
657#endif 653#endif
658 654
@@ -2784,64 +2780,93 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2784 struct atl1_hw *hw = &adapter->hw; 2780 struct atl1_hw *hw = &adapter->hw;
2785 u32 ctrl = 0; 2781 u32 ctrl = 0;
2786 u32 wufc = adapter->wol; 2782 u32 wufc = adapter->wol;
2783 u32 val;
2784 int retval;
2785 u16 speed;
2786 u16 duplex;
2787 2787
2788 netif_device_detach(netdev); 2788 netif_device_detach(netdev);
2789 if (netif_running(netdev)) 2789 if (netif_running(netdev))
2790 atl1_down(adapter); 2790 atl1_down(adapter);
2791 2791
2792 retval = pci_save_state(pdev);
2793 if (retval)
2794 return retval;
2795
2792 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2796 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2793 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2797 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2794 if (ctrl & BMSR_LSTATUS) 2798 val = ctrl & BMSR_LSTATUS;
2799 if (val)
2795 wufc &= ~ATLX_WUFC_LNKC; 2800 wufc &= ~ATLX_WUFC_LNKC;
2796 2801
2797 /* reduce speed to 10/100M */ 2802 if (val && wufc) {
2798 if (wufc) { 2803 val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
2799 atl1_phy_enter_power_saving(hw); 2804 if (val) {
2800 /* if resume, let driver to re- setup link */ 2805 if (netif_msg_ifdown(adapter))
2801 hw->phy_configured = false; 2806 dev_printk(KERN_DEBUG, &pdev->dev,
2802 atl1_set_mac_addr(hw); 2807 "error getting speed/duplex\n");
2803 atlx_set_multi(netdev); 2808 goto disable_wol;
2809 }
2804 2810
2805 ctrl = 0; 2811 ctrl = 0;
2806 /* turn on magic packet wol */
2807 if (wufc & ATLX_WUFC_MAG)
2808 ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2809 2812
2810 /* turn on Link change WOL */ 2813 /* enable magic packet WOL */
2811 if (wufc & ATLX_WUFC_LNKC) 2814 if (wufc & ATLX_WUFC_MAG)
2812 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2815 ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
2813 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2816 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2814 2817 ioread32(hw->hw_addr + REG_WOL_CTRL);
2815 /* turn on all-multi mode if wake on multicast is enabled */ 2818
2816 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); 2819 /* configure the mac */
2817 ctrl &= ~MAC_CTRL_DBG; 2820 ctrl = MAC_CTRL_RX_EN;
2818 ctrl &= ~MAC_CTRL_PROMIS_EN; 2821 ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 :
2819 if (wufc & ATLX_WUFC_MC) 2822 MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT);
2820 ctrl |= MAC_CTRL_MC_ALL_EN; 2823 if (duplex == FULL_DUPLEX)
2821 else 2824 ctrl |= MAC_CTRL_DUPLX;
2822 ctrl &= ~MAC_CTRL_MC_ALL_EN; 2825 ctrl |= (((u32)adapter->hw.preamble_len &
2823 2826 MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
2824 /* turn on broadcast mode if wake on-BC is enabled */ 2827 if (adapter->vlgrp)
2825 if (wufc & ATLX_WUFC_BC) 2828 ctrl |= MAC_CTRL_RMV_VLAN;
2829 if (wufc & ATLX_WUFC_MAG)
2826 ctrl |= MAC_CTRL_BC_EN; 2830 ctrl |= MAC_CTRL_BC_EN;
2827 else
2828 ctrl &= ~MAC_CTRL_BC_EN;
2829
2830 /* enable RX */
2831 ctrl |= MAC_CTRL_RX_EN;
2832 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); 2831 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
2833 pci_enable_wake(pdev, PCI_D3hot, 1); 2832 ioread32(hw->hw_addr + REG_MAC_CTRL);
2834 pci_enable_wake(pdev, PCI_D3cold, 1); 2833
2835 } else { 2834 /* poke the PHY */
2836 iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2835 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2837 pci_enable_wake(pdev, PCI_D3hot, 0); 2836 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2838 pci_enable_wake(pdev, PCI_D3cold, 0); 2837 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2838 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2839
2840 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2841 goto exit;
2839 } 2842 }
2840 2843
2841 pci_save_state(pdev); 2844 if (!val && wufc) {
2845 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
2846 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2847 ioread32(hw->hw_addr + REG_WOL_CTRL);
2848 iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
2849 ioread32(hw->hw_addr + REG_MAC_CTRL);
2850 hw->phy_configured = false;
2851 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2852 goto exit;
2853 }
2854
2855disable_wol:
2856 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
2857 ioread32(hw->hw_addr + REG_WOL_CTRL);
2858 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2859 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2860 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2861 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2862 atl1_phy_enter_power_saving(hw);
2863 hw->phy_configured = false;
2864 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2865exit:
2866 if (netif_running(netdev))
2867 pci_disable_msi(adapter->pdev);
2842 pci_disable_device(pdev); 2868 pci_disable_device(pdev);
2843 2869 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2844 pci_set_power_state(pdev, PCI_D3hot);
2845 2870
2846 return 0; 2871 return 0;
2847} 2872}
@@ -2855,20 +2880,26 @@ static int atl1_resume(struct pci_dev *pdev)
2855 pci_set_power_state(pdev, PCI_D0); 2880 pci_set_power_state(pdev, PCI_D0);
2856 pci_restore_state(pdev); 2881 pci_restore_state(pdev);
2857 2882
2858 /* FIXME: check and handle */
2859 err = pci_enable_device(pdev); 2883 err = pci_enable_device(pdev);
2884 if (err) {
2885 if (netif_msg_ifup(adapter))
2886 dev_printk(KERN_DEBUG, &pdev->dev,
2887 "error enabling pci device\n");
2888 return err;
2889 }
2890
2891 pci_set_master(pdev);
2892 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
2860 pci_enable_wake(pdev, PCI_D3hot, 0); 2893 pci_enable_wake(pdev, PCI_D3hot, 0);
2861 pci_enable_wake(pdev, PCI_D3cold, 0); 2894 pci_enable_wake(pdev, PCI_D3cold, 0);
2862 2895
2863 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2896 atl1_reset_hw(&adapter->hw);
2864 atl1_reset(adapter); 2897 adapter->cmb.cmb->int_stats = 0;
2865 2898
2866 if (netif_running(netdev)) 2899 if (netif_running(netdev))
2867 atl1_up(adapter); 2900 atl1_up(adapter);
2868 netif_device_attach(netdev); 2901 netif_device_attach(netdev);
2869 2902
2870 atl1_via_workaround(adapter);
2871
2872 return 0; 2903 return 0;
2873} 2904}
2874#else 2905#else
@@ -2876,6 +2907,13 @@ static int atl1_resume(struct pci_dev *pdev)
2876#define atl1_resume NULL 2907#define atl1_resume NULL
2877#endif 2908#endif
2878 2909
2910static void atl1_shutdown(struct pci_dev *pdev)
2911{
2912#ifdef CONFIG_PM
2913 atl1_suspend(pdev, PMSG_SUSPEND);
2914#endif
2915}
2916
2879#ifdef CONFIG_NET_POLL_CONTROLLER 2917#ifdef CONFIG_NET_POLL_CONTROLLER
2880static void atl1_poll_controller(struct net_device *netdev) 2918static void atl1_poll_controller(struct net_device *netdev)
2881{ 2919{
@@ -3122,7 +3160,8 @@ static struct pci_driver atl1_driver = {
3122 .probe = atl1_probe, 3160 .probe = atl1_probe,
3123 .remove = __devexit_p(atl1_remove), 3161 .remove = __devexit_p(atl1_remove),
3124 .suspend = atl1_suspend, 3162 .suspend = atl1_suspend,
3125 .resume = atl1_resume 3163 .resume = atl1_resume,
3164 .shutdown = atl1_shutdown
3126}; 3165};
3127 3166
3128/* 3167/*
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 51893d66eae1..a5015b14a429 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 4 * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
5 * 5 *
6 * Derived from Intel e1000 driver 6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index f06b854e2501..b3e7fcf0f6e7 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
5 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 5 * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
6 * Copyright(c) 2007 Atheros Corporation. All rights reserved. 6 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
7 * 7 *
8 * Derived from Intel e1000 driver 8 * Derived from Intel e1000 driver
diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h
index 3be7c09734d4..297a03da6b7f 100644
--- a/drivers/net/atlx/atlx.h
+++ b/drivers/net/atlx/atlx.h
@@ -2,7 +2,7 @@
2 * 2 *
3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
5 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 5 * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
6 * Copyright(c) 2007 Atheros Corporation. All rights reserved. 6 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
7 * 7 *
8 * Derived from Intel e1000 driver 8 * Derived from Intel e1000 driver
@@ -29,7 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/types.h> 30#include <linux/types.h>
31 31
32#define ATLX_DRIVER_VERSION "2.1.1" 32#define ATLX_DRIVER_VERSION "2.1.3"
33MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ 33MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
34 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); 34 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
35MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
@@ -460,6 +460,9 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
460#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ 460#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */
461#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ 461#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
462 462
463#define MII_DBG_ADDR 0x1D
464#define MII_DBG_DATA 0x1E
465
463/* PCI Command Register Bit Definitions */ 466/* PCI Command Register Bit Definitions */
464#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ 467#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
465#define CMD_IO_SPACE 0x0001 468#define CMD_IO_SPACE 0x0001
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6425603bc379..50a40e433154 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1425,13 +1425,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1425 res = netdev_set_master(slave_dev, bond_dev); 1425 res = netdev_set_master(slave_dev, bond_dev);
1426 if (res) { 1426 if (res) {
1427 dprintk("Error %d calling netdev_set_master\n", res); 1427 dprintk("Error %d calling netdev_set_master\n", res);
1428 goto err_close; 1428 goto err_restore_mac;
1429 } 1429 }
1430 /* open the slave since the application closed it */ 1430 /* open the slave since the application closed it */
1431 res = dev_open(slave_dev); 1431 res = dev_open(slave_dev);
1432 if (res) { 1432 if (res) {
1433 dprintk("Openning slave %s failed\n", slave_dev->name); 1433 dprintk("Openning slave %s failed\n", slave_dev->name);
1434 goto err_restore_mac; 1434 goto err_unset_master;
1435 } 1435 }
1436 1436
1437 new_slave->dev = slave_dev; 1437 new_slave->dev = slave_dev;
@@ -1444,7 +1444,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1444 */ 1444 */
1445 res = bond_alb_init_slave(bond, new_slave); 1445 res = bond_alb_init_slave(bond, new_slave);
1446 if (res) { 1446 if (res) {
1447 goto err_unset_master; 1447 goto err_close;
1448 } 1448 }
1449 } 1449 }
1450 1450
@@ -1619,7 +1619,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1619 1619
1620 res = bond_create_slave_symlinks(bond_dev, slave_dev); 1620 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1621 if (res) 1621 if (res)
1622 goto err_unset_master; 1622 goto err_close;
1623 1623
1624 printk(KERN_INFO DRV_NAME 1624 printk(KERN_INFO DRV_NAME
1625 ": %s: enslaving %s as a%s interface with a%s link.\n", 1625 ": %s: enslaving %s as a%s interface with a%s link.\n",
@@ -1631,12 +1631,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1631 return 0; 1631 return 0;
1632 1632
1633/* Undo stages on error */ 1633/* Undo stages on error */
1634err_unset_master:
1635 netdev_set_master(slave_dev, NULL);
1636
1637err_close: 1634err_close:
1638 dev_close(slave_dev); 1635 dev_close(slave_dev);
1639 1636
1637err_unset_master:
1638 netdev_set_master(slave_dev, NULL);
1639
1640err_restore_mac: 1640err_restore_mac:
1641 if (!bond->params.fail_over_mac) { 1641 if (!bond->params.fail_over_mac) {
1642 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); 1642 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
@@ -4936,7 +4936,9 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4936 if (res < 0) { 4936 if (res < 0) {
4937 rtnl_lock(); 4937 rtnl_lock();
4938 down_write(&bonding_rwsem); 4938 down_write(&bonding_rwsem);
4939 goto out_bond; 4939 bond_deinit(bond_dev);
4940 unregister_netdevice(bond_dev);
4941 goto out_rtnl;
4940 } 4942 }
4941 4943
4942 return 0; 4944 return 0;
@@ -4990,9 +4992,10 @@ err:
4990 destroy_workqueue(bond->wq); 4992 destroy_workqueue(bond->wq);
4991 } 4993 }
4992 4994
4995 bond_destroy_sysfs();
4996
4993 rtnl_lock(); 4997 rtnl_lock();
4994 bond_free_all(); 4998 bond_free_all();
4995 bond_destroy_sysfs();
4996 rtnl_unlock(); 4999 rtnl_unlock();
4997out: 5000out:
4998 return res; 5001 return res;
@@ -5004,9 +5007,10 @@ static void __exit bonding_exit(void)
5004 unregister_netdevice_notifier(&bond_netdev_notifier); 5007 unregister_netdevice_notifier(&bond_netdev_notifier);
5005 unregister_inetaddr_notifier(&bond_inetaddr_notifier); 5008 unregister_inetaddr_notifier(&bond_inetaddr_notifier);
5006 5009
5010 bond_destroy_sysfs();
5011
5007 rtnl_lock(); 5012 rtnl_lock();
5008 bond_free_all(); 5013 bond_free_all();
5009 bond_destroy_sysfs();
5010 rtnl_unlock(); 5014 rtnl_unlock();
5011} 5015}
5012 5016
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 979c2d05ff9c..08f3d396bcd6 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -146,29 +146,29 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
146 ": Unable remove bond %s due to open references.\n", 146 ": Unable remove bond %s due to open references.\n",
147 ifname); 147 ifname);
148 res = -EPERM; 148 res = -EPERM;
149 goto out; 149 goto out_unlock;
150 } 150 }
151 printk(KERN_INFO DRV_NAME 151 printk(KERN_INFO DRV_NAME
152 ": %s is being deleted...\n", 152 ": %s is being deleted...\n",
153 bond->dev->name); 153 bond->dev->name);
154 bond_destroy(bond); 154 bond_destroy(bond);
155 up_write(&bonding_rwsem); 155 goto out_unlock;
156 rtnl_unlock();
157 goto out;
158 } 156 }
159 157
160 printk(KERN_ERR DRV_NAME 158 printk(KERN_ERR DRV_NAME
161 ": unable to delete non-existent bond %s\n", ifname); 159 ": unable to delete non-existent bond %s\n", ifname);
162 res = -ENODEV; 160 res = -ENODEV;
163 up_write(&bonding_rwsem); 161 goto out_unlock;
164 rtnl_unlock();
165 goto out;
166 } 162 }
167 163
168err_no_cmd: 164err_no_cmd:
169 printk(KERN_ERR DRV_NAME 165 printk(KERN_ERR DRV_NAME
170 ": no command found in bonding_masters. Use +ifname or -ifname.\n"); 166 ": no command found in bonding_masters. Use +ifname or -ifname.\n");
171 res = -EPERM; 167 return -EPERM;
168
169out_unlock:
170 up_write(&bonding_rwsem);
171 rtnl_unlock();
172 172
173 /* Always return either count or an error. If you return 0, you'll 173 /* Always return either count or an error. If you return 0, you'll
174 * get called forever, which is bad. 174 * get called forever, which is bad.
@@ -1437,8 +1437,16 @@ int bond_create_sysfs(void)
1437 * configure multiple bonding devices. 1437 * configure multiple bonding devices.
1438 */ 1438 */
1439 if (ret == -EEXIST) { 1439 if (ret == -EEXIST) {
1440 netdev_class = NULL; 1440 /* Is someone being kinky and naming a device bonding_master? */
1441 return 0; 1441 if (__dev_get_by_name(&init_net,
1442 class_attr_bonding_masters.attr.name))
1443 printk(KERN_ERR
1444 "network device named %s already exists in sysfs",
1445 class_attr_bonding_masters.attr.name);
1446 else {
1447 netdev_class = NULL;
1448 return 0;
1449 }
1442 } 1450 }
1443 1451
1444 return ret; 1452 return ret;
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 4fdb13f8447b..acebe431d068 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -71,6 +71,7 @@ enum { /* adapter flags */
71 USING_MSIX = (1 << 2), 71 USING_MSIX = (1 << 2),
72 QUEUES_BOUND = (1 << 3), 72 QUEUES_BOUND = (1 << 3),
73 TP_PARITY_INIT = (1 << 4), 73 TP_PARITY_INIT = (1 << 4),
74 NAPI_INIT = (1 << 5),
74}; 75};
75 76
76struct fl_pg_chunk { 77struct fl_pg_chunk {
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 91ee7277b813..579bee42a5cb 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -698,6 +698,7 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
698void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); 698void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
699int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, 699int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
700 int reset); 700 int reset);
701int t3_replay_prep_adapter(struct adapter *adapter);
701void t3_led_ready(struct adapter *adapter); 702void t3_led_ready(struct adapter *adapter);
702void t3_fatal_err(struct adapter *adapter); 703void t3_fatal_err(struct adapter *adapter);
703void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); 704void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 05e5f59e87fa..3a3127216791 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -421,6 +421,13 @@ static void init_napi(struct adapter *adap)
421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, 421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
422 64); 422 64);
423 } 423 }
424
425 /*
426 * netif_napi_add() can be called only once per napi_struct because it
427 * adds each new napi_struct to a list. Be careful not to call it a
428 * second time, e.g., during EEH recovery, by making a note of it.
429 */
430 adap->flags |= NAPI_INIT;
424} 431}
425 432
426/* 433/*
@@ -896,7 +903,8 @@ static int cxgb_up(struct adapter *adap)
896 goto out; 903 goto out;
897 904
898 setup_rss(adap); 905 setup_rss(adap);
899 init_napi(adap); 906 if (!(adap->flags & NAPI_INIT))
907 init_napi(adap);
900 adap->flags |= FULL_INIT_DONE; 908 adap->flags |= FULL_INIT_DONE;
901 } 909 }
902 910
@@ -999,7 +1007,7 @@ static int offload_open(struct net_device *dev)
999 return 0; 1007 return 0;
1000 1008
1001 if (!adap_up && (err = cxgb_up(adapter)) < 0) 1009 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1002 return err; 1010 goto out;
1003 1011
1004 t3_tp_set_offload_mode(adapter, 1); 1012 t3_tp_set_offload_mode(adapter, 1);
1005 tdev->lldev = adapter->port[0]; 1013 tdev->lldev = adapter->port[0];
@@ -1061,10 +1069,8 @@ static int cxgb_open(struct net_device *dev)
1061 int other_ports = adapter->open_device_map & PORT_MASK; 1069 int other_ports = adapter->open_device_map & PORT_MASK;
1062 int err; 1070 int err;
1063 1071
1064 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { 1072 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1065 quiesce_rx(adapter);
1066 return err; 1073 return err;
1067 }
1068 1074
1069 set_bit(pi->port_id, &adapter->open_device_map); 1075 set_bit(pi->port_id, &adapter->open_device_map);
1070 if (is_offload(adapter) && !ofld_disable) { 1076 if (is_offload(adapter) && !ofld_disable) {
@@ -1894,11 +1900,11 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1894 u8 *fw_data; 1900 u8 *fw_data;
1895 struct ch_mem_range t; 1901 struct ch_mem_range t;
1896 1902
1897 if (!capable(CAP_NET_ADMIN)) 1903 if (!capable(CAP_SYS_RAWIO))
1898 return -EPERM; 1904 return -EPERM;
1899 if (copy_from_user(&t, useraddr, sizeof(t))) 1905 if (copy_from_user(&t, useraddr, sizeof(t)))
1900 return -EFAULT; 1906 return -EFAULT;
1901 1907 /* Check t.len sanity ? */
1902 fw_data = kmalloc(t.len, GFP_KERNEL); 1908 fw_data = kmalloc(t.len, GFP_KERNEL);
1903 if (!fw_data) 1909 if (!fw_data)
1904 return -ENOMEM; 1910 return -ENOMEM;
@@ -2424,14 +2430,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2424 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) 2430 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2425 offload_close(&adapter->tdev); 2431 offload_close(&adapter->tdev);
2426 2432
2427 /* Free sge resources */
2428 t3_free_sge_resources(adapter);
2429
2430 adapter->flags &= ~FULL_INIT_DONE; 2433 adapter->flags &= ~FULL_INIT_DONE;
2431 2434
2432 pci_disable_device(pdev); 2435 pci_disable_device(pdev);
2433 2436
2434 /* Request a slot slot reset. */ 2437 /* Request a slot reset. */
2435 return PCI_ERS_RESULT_NEED_RESET; 2438 return PCI_ERS_RESULT_NEED_RESET;
2436} 2439}
2437 2440
@@ -2448,13 +2451,20 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2448 if (pci_enable_device(pdev)) { 2451 if (pci_enable_device(pdev)) {
2449 dev_err(&pdev->dev, 2452 dev_err(&pdev->dev,
2450 "Cannot re-enable PCI device after reset.\n"); 2453 "Cannot re-enable PCI device after reset.\n");
2451 return PCI_ERS_RESULT_DISCONNECT; 2454 goto err;
2452 } 2455 }
2453 pci_set_master(pdev); 2456 pci_set_master(pdev);
2457 pci_restore_state(pdev);
2454 2458
2455 t3_prep_adapter(adapter, adapter->params.info, 1); 2459 /* Free sge resources */
2460 t3_free_sge_resources(adapter);
2461
2462 if (t3_replay_prep_adapter(adapter))
2463 goto err;
2456 2464
2457 return PCI_ERS_RESULT_RECOVERED; 2465 return PCI_ERS_RESULT_RECOVERED;
2466err:
2467 return PCI_ERS_RESULT_DISCONNECT;
2458} 2468}
2459 2469
2460/** 2470/**
@@ -2483,13 +2493,6 @@ static void t3_io_resume(struct pci_dev *pdev)
2483 netif_device_attach(netdev); 2493 netif_device_attach(netdev);
2484 } 2494 }
2485 } 2495 }
2486
2487 if (is_offload(adapter)) {
2488 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2489 if (offload_open(adapter->port[0]))
2490 printk(KERN_WARNING
2491 "Could not bring back offload capabilities\n");
2492 }
2493} 2496}
2494 2497
2495static struct pci_error_handlers t3_err_handler = { 2498static struct pci_error_handlers t3_err_handler = {
@@ -2608,6 +2611,7 @@ static int __devinit init_one(struct pci_dev *pdev,
2608 } 2611 }
2609 2612
2610 pci_set_master(pdev); 2613 pci_set_master(pdev);
2614 pci_save_state(pdev);
2611 2615
2612 mmio_start = pci_resource_start(pdev, 0); 2616 mmio_start = pci_resource_start(pdev, 0);
2613 mmio_len = pci_resource_len(pdev, 0); 2617 mmio_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 02dbbb300929..567178879345 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -444,6 +444,14 @@
444 444
445#define A_PCIE_CFG 0x88 445#define A_PCIE_CFG 0x88
446 446
447#define S_ENABLELINKDWNDRST 21
448#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
449#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U)
450
451#define S_ENABLELINKDOWNRST 20
452#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
453#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U)
454
447#define S_PCIE_CLIDECEN 16 455#define S_PCIE_CLIDECEN 16
448#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) 456#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
449#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U) 457#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 98a6bbd11d4c..796eb305cdc3 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -539,6 +539,31 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
539} 539}
540 540
541/** 541/**
542 * t3_reset_qset - reset a sge qset
543 * @q: the queue set
544 *
545 * Reset the qset structure.
546 * the NAPI structure is preserved in the event of
547 * the qset's reincarnation, for example during EEH recovery.
548 */
549static void t3_reset_qset(struct sge_qset *q)
550{
551 if (q->adap &&
552 !(q->adap->flags & NAPI_INIT)) {
553 memset(q, 0, sizeof(*q));
554 return;
555 }
556
557 q->adap = NULL;
558 memset(&q->rspq, 0, sizeof(q->rspq));
559 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
560 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
561 q->txq_stopped = 0;
562 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
563}
564
565
566/**
542 * free_qset - free the resources of an SGE queue set 567 * free_qset - free the resources of an SGE queue set
543 * @adapter: the adapter owning the queue set 568 * @adapter: the adapter owning the queue set
544 * @q: the queue set 569 * @q: the queue set
@@ -594,7 +619,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
594 q->rspq.desc, q->rspq.phys_addr); 619 q->rspq.desc, q->rspq.phys_addr);
595 } 620 }
596 621
597 memset(q, 0, sizeof(*q)); 622 t3_reset_qset(q);
598} 623}
599 624
600/** 625/**
@@ -1365,7 +1390,7 @@ static void restart_ctrlq(unsigned long data)
1365 */ 1390 */
1366int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 1391int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1367{ 1392{
1368 int ret; 1393 int ret;
1369 local_bh_disable(); 1394 local_bh_disable();
1370 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); 1395 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1371 local_bh_enable(); 1396 local_bh_enable();
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index a99496a431c4..d405a932c73a 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -3264,6 +3264,7 @@ static void config_pcie(struct adapter *adap)
3264 3264
3265 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); 3265 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3266 t3_set_reg_field(adap, A_PCIE_CFG, 0, 3266 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3267 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3267 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); 3268 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3268} 3269}
3269 3270
@@ -3655,3 +3656,30 @@ void t3_led_ready(struct adapter *adapter)
3655 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 3656 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3656 F_GPIO0_OUT_VAL); 3657 F_GPIO0_OUT_VAL);
3657} 3658}
3659
3660int t3_replay_prep_adapter(struct adapter *adapter)
3661{
3662 const struct adapter_info *ai = adapter->params.info;
3663 unsigned int i, j = 0;
3664 int ret;
3665
3666 early_hw_init(adapter, ai);
3667 ret = init_parity(adapter);
3668 if (ret)
3669 return ret;
3670
3671 for_each_port(adapter, i) {
3672 struct port_info *p = adap2pinfo(adapter, i);
3673 while (!adapter->params.vpd.port_type[j])
3674 ++j;
3675
3676 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3677 ai->mdio_ops);
3678
3679 p->phy.ops->power_down(&p->phy, 1);
3680 ++j;
3681 }
3682
3683return 0;
3684}
3685
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 12b4626102e1..32a9a922f153 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -117,6 +117,9 @@ typedef struct board_info {
117 117
118 struct mutex addr_lock; /* phy and eeprom access lock */ 118 struct mutex addr_lock; /* phy and eeprom access lock */
119 119
120 struct delayed_work phy_poll;
121 struct net_device *ndev;
122
120 spinlock_t lock; 123 spinlock_t lock;
121 124
122 struct mii_if_info mii; 125 struct mii_if_info mii;
@@ -297,6 +300,10 @@ static void dm9000_set_io(struct board_info *db, int byte_width)
297 } 300 }
298} 301}
299 302
303static void dm9000_schedule_poll(board_info_t *db)
304{
305 schedule_delayed_work(&db->phy_poll, HZ * 2);
306}
300 307
301/* Our watchdog timed out. Called by the networking layer */ 308/* Our watchdog timed out. Called by the networking layer */
302static void dm9000_timeout(struct net_device *dev) 309static void dm9000_timeout(struct net_device *dev)
@@ -465,6 +472,17 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
465 .set_eeprom = dm9000_set_eeprom, 472 .set_eeprom = dm9000_set_eeprom,
466}; 473};
467 474
475static void
476dm9000_poll_work(struct work_struct *w)
477{
478 struct delayed_work *dw = container_of(w, struct delayed_work, work);
479 board_info_t *db = container_of(dw, board_info_t, phy_poll);
480
481 mii_check_media(&db->mii, netif_msg_link(db), 0);
482
483 if (netif_running(db->ndev))
484 dm9000_schedule_poll(db);
485}
468 486
469/* dm9000_release_board 487/* dm9000_release_board
470 * 488 *
@@ -503,7 +521,7 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db)
503/* 521/*
504 * Search DM9000 board, allocate space and register it 522 * Search DM9000 board, allocate space and register it
505 */ 523 */
506static int 524static int __devinit
507dm9000_probe(struct platform_device *pdev) 525dm9000_probe(struct platform_device *pdev)
508{ 526{
509 struct dm9000_plat_data *pdata = pdev->dev.platform_data; 527 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
@@ -525,17 +543,21 @@ dm9000_probe(struct platform_device *pdev)
525 543
526 SET_NETDEV_DEV(ndev, &pdev->dev); 544 SET_NETDEV_DEV(ndev, &pdev->dev);
527 545
528 dev_dbg(&pdev->dev, "dm9000_probe()"); 546 dev_dbg(&pdev->dev, "dm9000_probe()\n");
529 547
530 /* setup board info structure */ 548 /* setup board info structure */
531 db = (struct board_info *) ndev->priv; 549 db = (struct board_info *) ndev->priv;
532 memset(db, 0, sizeof (*db)); 550 memset(db, 0, sizeof (*db));
533 551
534 db->dev = &pdev->dev; 552 db->dev = &pdev->dev;
553 db->ndev = ndev;
535 554
536 spin_lock_init(&db->lock); 555 spin_lock_init(&db->lock);
537 mutex_init(&db->addr_lock); 556 mutex_init(&db->addr_lock);
538 557
558 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
559
560
539 if (pdev->num_resources < 2) { 561 if (pdev->num_resources < 2) {
540 ret = -ENODEV; 562 ret = -ENODEV;
541 goto out; 563 goto out;
@@ -761,6 +783,8 @@ dm9000_open(struct net_device *dev)
761 783
762 mii_check_media(&db->mii, netif_msg_link(db), 1); 784 mii_check_media(&db->mii, netif_msg_link(db), 1);
763 netif_start_queue(dev); 785 netif_start_queue(dev);
786
787 dm9000_schedule_poll(db);
764 788
765 return 0; 789 return 0;
766} 790}
@@ -879,6 +903,8 @@ dm9000_stop(struct net_device *ndev)
879 if (netif_msg_ifdown(db)) 903 if (netif_msg_ifdown(db))
880 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 904 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
881 905
906 cancel_delayed_work(&db->phy_poll);
907
882 netif_stop_queue(ndev); 908 netif_stop_queue(ndev);
883 netif_carrier_off(ndev); 909 netif_carrier_off(ndev);
884 910
@@ -1288,6 +1314,8 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1288 spin_unlock_irqrestore(&db->lock,flags); 1314 spin_unlock_irqrestore(&db->lock,flags);
1289 1315
1290 mutex_unlock(&db->addr_lock); 1316 mutex_unlock(&db->addr_lock);
1317
1318 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
1291 return ret; 1319 return ret;
1292} 1320}
1293 1321
@@ -1301,6 +1329,7 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1301 unsigned long flags; 1329 unsigned long flags;
1302 unsigned long reg_save; 1330 unsigned long reg_save;
1303 1331
1332 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
1304 mutex_lock(&db->addr_lock); 1333 mutex_lock(&db->addr_lock);
1305 1334
1306 spin_lock_irqsave(&db->lock,flags); 1335 spin_lock_irqsave(&db->lock,flags);
@@ -1372,7 +1401,7 @@ dm9000_drv_resume(struct platform_device *dev)
1372 return 0; 1401 return 0;
1373} 1402}
1374 1403
1375static int 1404static int __devexit
1376dm9000_drv_remove(struct platform_device *pdev) 1405dm9000_drv_remove(struct platform_device *pdev)
1377{ 1406{
1378 struct net_device *ndev = platform_get_drvdata(pdev); 1407 struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1393,7 +1422,7 @@ static struct platform_driver dm9000_driver = {
1393 .owner = THIS_MODULE, 1422 .owner = THIS_MODULE,
1394 }, 1423 },
1395 .probe = dm9000_probe, 1424 .probe = dm9000_probe,
1396 .remove = dm9000_drv_remove, 1425 .remove = __devexit_p(dm9000_drv_remove),
1397 .suspend = dm9000_drv_suspend, 1426 .suspend = dm9000_drv_suspend,
1398 .resume = dm9000_drv_resume, 1427 .resume = dm9000_drv_resume,
1399}; 1428};
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 2a53875cddbf..f823b8ba5785 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -648,6 +648,8 @@
648#define IFE_E_PHY_ID 0x02A80330 648#define IFE_E_PHY_ID 0x02A80330
649#define IFE_PLUS_E_PHY_ID 0x02A80320 649#define IFE_PLUS_E_PHY_ID 0x02A80320
650#define IFE_C_E_PHY_ID 0x02A80310 650#define IFE_C_E_PHY_ID 0x02A80310
651#define BME1000_E_PHY_ID 0x01410CB0
652#define BME1000_E_PHY_ID_R2 0x01410CB1
651 653
652/* M88E1000 Specific Registers */ 654/* M88E1000 Specific Registers */
653#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 655#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -701,6 +703,14 @@
701#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 703#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
702#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 704#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
703 705
706/* BME1000 PHY Specific Control Register */
707#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
708
709
710#define PHY_PAGE_SHIFT 5
711#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
712 ((reg) & MAX_PHY_REG_ADDRESS))
713
704/* 714/*
705 * Bits... 715 * Bits...
706 * 15-5: page 716 * 15-5: page
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 38bfd0d261fe..d3bc6f8101fa 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -127,7 +127,7 @@ struct e1000_buffer {
127 /* arrays of page information for packet split */ 127 /* arrays of page information for packet split */
128 struct e1000_ps_page *ps_pages; 128 struct e1000_ps_page *ps_pages;
129 }; 129 };
130 130 struct page *page;
131}; 131};
132 132
133struct e1000_ring { 133struct e1000_ring {
@@ -304,6 +304,7 @@ struct e1000_info {
304#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) 304#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
305#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 305#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
306#define FLAG_HAS_JUMBO_FRAMES (1 << 7) 306#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
307#define FLAG_IS_ICH (1 << 9)
307#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 308#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
308#define FLAG_IS_QUAD_PORT_A (1 << 12) 309#define FLAG_IS_QUAD_PORT_A (1 << 12)
309#define FLAG_IS_QUAD_PORT (1 << 13) 310#define FLAG_IS_QUAD_PORT (1 << 13)
@@ -386,6 +387,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
386 bool state); 387 bool state);
387extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); 388extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
388extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); 389extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
390extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
389 391
390extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); 392extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
391extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); 393extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
@@ -443,6 +445,9 @@ extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
443extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); 445extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
444extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); 446extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
445extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); 447extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
448extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
449extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
450extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
446extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 451extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
447extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 452extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
448extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); 453extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index ce045acce63e..a14561f40db0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -494,8 +494,12 @@ static int e1000_get_eeprom(struct net_device *netdev,
494 for (i = 0; i < last_word - first_word + 1; i++) { 494 for (i = 0; i < last_word - first_word + 1; i++) {
495 ret_val = e1000_read_nvm(hw, first_word + i, 1, 495 ret_val = e1000_read_nvm(hw, first_word + i, 1,
496 &eeprom_buff[i]); 496 &eeprom_buff[i]);
497 if (ret_val) 497 if (ret_val) {
498 /* a read error occurred, throw away the
499 * result */
500 memset(eeprom_buff, 0xff, sizeof(eeprom_buff));
498 break; 501 break;
502 }
499 } 503 }
500 } 504 }
501 505
@@ -803,8 +807,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
803 /* restore previous status */ 807 /* restore previous status */
804 ew32(STATUS, before); 808 ew32(STATUS, before);
805 809
806 if ((mac->type != e1000_ich8lan) && 810 if (!(adapter->flags & FLAG_IS_ICH)) {
807 (mac->type != e1000_ich9lan)) {
808 REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); 811 REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
809 REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); 812 REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
810 REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); 813 REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
@@ -824,15 +827,13 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
824 827
825 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); 828 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
826 829
827 before = (((mac->type == e1000_ich8lan) || 830 before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
828 (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE);
829 REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); 831 REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
830 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); 832 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
831 833
832 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); 834 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
833 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 835 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
834 if ((mac->type != e1000_ich8lan) && 836 if (!(adapter->flags & FLAG_IS_ICH))
835 (mac->type != e1000_ich9lan))
836 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); 837 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
837 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 838 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
838 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); 839 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
@@ -911,9 +912,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
911 912
912 /* Test each interrupt */ 913 /* Test each interrupt */
913 for (i = 0; i < 10; i++) { 914 for (i = 0; i < 10; i++) {
914 915 if ((adapter->flags & FLAG_IS_ICH) && (i == 8))
915 if (((adapter->hw.mac.type == e1000_ich8lan) ||
916 (adapter->hw.mac.type == e1000_ich9lan)) && i == 8)
917 continue; 916 continue;
918 917
919 /* Interrupt to test */ 918 /* Interrupt to test */
@@ -1184,6 +1183,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1184 struct e1000_hw *hw = &adapter->hw; 1183 struct e1000_hw *hw = &adapter->hw;
1185 u32 ctrl_reg = 0; 1184 u32 ctrl_reg = 0;
1186 u32 stat_reg = 0; 1185 u32 stat_reg = 0;
1186 u16 phy_reg = 0;
1187 1187
1188 hw->mac.autoneg = 0; 1188 hw->mac.autoneg = 0;
1189 1189
@@ -1211,6 +1211,28 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1211 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1211 E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1212 E1000_CTRL_FD); /* Force Duplex to FULL */ 1212 E1000_CTRL_FD); /* Force Duplex to FULL */
1213 break; 1213 break;
1214 case e1000_phy_bm:
1215 /* Set Default MAC Interface speed to 1GB */
1216 e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
1217 phy_reg &= ~0x0007;
1218 phy_reg |= 0x006;
1219 e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
1220 /* Assert SW reset for above settings to take effect */
1221 e1000e_commit_phy(hw);
1222 mdelay(1);
1223 /* Force Full Duplex */
1224 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1225 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
1226 /* Set Link Up (in force link) */
1227 e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
1228 e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
1229 /* Force Link */
1230 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1231 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
1232 /* Set Early Link Enable */
1233 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1234 e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
1235 /* fall through */
1214 default: 1236 default:
1215 /* force 1000, set loopback */ 1237 /* force 1000, set loopback */
1216 e1e_wphy(hw, PHY_CONTROL, 0x4140); 1238 e1e_wphy(hw, PHY_CONTROL, 0x4140);
@@ -1224,8 +1246,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1224 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1246 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1225 E1000_CTRL_FD); /* Force Duplex to FULL */ 1247 E1000_CTRL_FD); /* Force Duplex to FULL */
1226 1248
1227 if ((adapter->hw.mac.type == e1000_ich8lan) || 1249 if (adapter->flags & FLAG_IS_ICH)
1228 (adapter->hw.mac.type == e1000_ich9lan))
1229 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ 1250 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
1230 } 1251 }
1231 1252
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index a930e6d9cf02..74f263acb172 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -216,6 +216,21 @@ enum e1e_registers {
216#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ 216#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
217#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ 217#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
218#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ 218#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
219#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
220#define IGP_PAGE_SHIFT 5
221#define PHY_REG_MASK 0x1F
222
223#define BM_WUC_PAGE 800
224#define BM_WUC_ADDRESS_OPCODE 0x11
225#define BM_WUC_DATA_OPCODE 0x12
226#define BM_WUC_ENABLE_PAGE 769
227#define BM_WUC_ENABLE_REG 17
228#define BM_WUC_ENABLE_BIT (1 << 2)
229#define BM_WUC_HOST_WU_BIT (1 << 4)
230
231#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
232#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
233#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
219 234
220#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 235#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
221#define IGP01E1000_PHY_POLARITY_MASK 0x0078 236#define IGP01E1000_PHY_POLARITY_MASK 0x0078
@@ -331,10 +346,16 @@ enum e1e_registers {
331#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 346#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
332#define E1000_DEV_ID_ICH8_IGP_M 0x104D 347#define E1000_DEV_ID_ICH8_IGP_M 0x104D
333#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD 348#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
349#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
350#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
351#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
334#define E1000_DEV_ID_ICH9_IGP_C 0x294C 352#define E1000_DEV_ID_ICH9_IGP_C 0x294C
335#define E1000_DEV_ID_ICH9_IFE 0x10C0 353#define E1000_DEV_ID_ICH9_IFE 0x10C0
336#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 354#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
337#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 355#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
356#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
357#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
358#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
338 359
339#define E1000_FUNC_1 1 360#define E1000_FUNC_1 1
340 361
@@ -378,6 +399,7 @@ enum e1000_phy_type {
378 e1000_phy_gg82563, 399 e1000_phy_gg82563,
379 e1000_phy_igp_3, 400 e1000_phy_igp_3,
380 e1000_phy_ife, 401 e1000_phy_ife,
402 e1000_phy_bm,
381}; 403};
382 404
383enum e1000_bus_width { 405enum e1000_bus_width {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 768485dbb2c6..9e38452a738c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -38,6 +38,12 @@
38 * 82566DM Gigabit Network Connection 38 * 82566DM Gigabit Network Connection
39 * 82566MC Gigabit Network Connection 39 * 82566MC Gigabit Network Connection
40 * 82566MM Gigabit Network Connection 40 * 82566MM Gigabit Network Connection
41 * 82567LM Gigabit Network Connection
42 * 82567LF Gigabit Network Connection
43 * 82567LM-2 Gigabit Network Connection
44 * 82567LF-2 Gigabit Network Connection
45 * 82567V-2 Gigabit Network Connection
46 * 82562GT-3 10/100 Network Connection
41 */ 47 */
42 48
43#include <linux/netdevice.h> 49#include <linux/netdevice.h>
@@ -198,6 +204,19 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
198 phy->addr = 1; 204 phy->addr = 1;
199 phy->reset_delay_us = 100; 205 phy->reset_delay_us = 100;
200 206
207 /*
208 * We may need to do this twice - once for IGP and if that fails,
209 * we'll set BM func pointers and try again
210 */
211 ret_val = e1000e_determine_phy_address(hw);
212 if (ret_val) {
213 hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
214 hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
215 ret_val = e1000e_determine_phy_address(hw);
216 if (ret_val)
217 return ret_val;
218 }
219
201 phy->id = 0; 220 phy->id = 0;
202 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && 221 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
203 (i++ < 100)) { 222 (i++ < 100)) {
@@ -219,6 +238,13 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
219 phy->type = e1000_phy_ife; 238 phy->type = e1000_phy_ife;
220 phy->autoneg_mask = E1000_ALL_NOT_GIG; 239 phy->autoneg_mask = E1000_ALL_NOT_GIG;
221 break; 240 break;
241 case BME1000_E_PHY_ID:
242 phy->type = e1000_phy_bm;
243 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
244 hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
245 hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
246 hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
247 break;
222 default: 248 default:
223 return -E1000_ERR_PHY; 249 return -E1000_ERR_PHY;
224 break; 250 break;
@@ -664,6 +690,7 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
664 return e1000_get_phy_info_ife_ich8lan(hw); 690 return e1000_get_phy_info_ife_ich8lan(hw);
665 break; 691 break;
666 case e1000_phy_igp_3: 692 case e1000_phy_igp_3:
693 case e1000_phy_bm:
667 return e1000e_get_phy_info_igp(hw); 694 return e1000e_get_phy_info_igp(hw);
668 break; 695 break;
669 default: 696 default:
@@ -728,7 +755,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
728 s32 ret_val = 0; 755 s32 ret_val = 0;
729 u16 data; 756 u16 data;
730 757
731 if (phy->type != e1000_phy_igp_3) 758 if (phy->type == e1000_phy_ife)
732 return ret_val; 759 return ret_val;
733 760
734 phy_ctrl = er32(PHY_CTRL); 761 phy_ctrl = er32(PHY_CTRL);
@@ -1918,8 +1945,35 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
1918 ret_val = e1000e_copper_link_setup_igp(hw); 1945 ret_val = e1000e_copper_link_setup_igp(hw);
1919 if (ret_val) 1946 if (ret_val)
1920 return ret_val; 1947 return ret_val;
1948 } else if (hw->phy.type == e1000_phy_bm) {
1949 ret_val = e1000e_copper_link_setup_m88(hw);
1950 if (ret_val)
1951 return ret_val;
1921 } 1952 }
1922 1953
1954 if (hw->phy.type == e1000_phy_ife) {
1955 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
1956 if (ret_val)
1957 return ret_val;
1958
1959 reg_data &= ~IFE_PMC_AUTO_MDIX;
1960
1961 switch (hw->phy.mdix) {
1962 case 1:
1963 reg_data &= ~IFE_PMC_FORCE_MDIX;
1964 break;
1965 case 2:
1966 reg_data |= IFE_PMC_FORCE_MDIX;
1967 break;
1968 case 0:
1969 default:
1970 reg_data |= IFE_PMC_AUTO_MDIX;
1971 break;
1972 }
1973 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
1974 if (ret_val)
1975 return ret_val;
1976 }
1923 return e1000e_setup_copper_link(hw); 1977 return e1000e_setup_copper_link(hw);
1924} 1978}
1925 1979
@@ -2127,6 +2181,31 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
2127} 2181}
2128 2182
2129/** 2183/**
2184 * e1000e_disable_gig_wol_ich8lan - disable gig during WoL
2185 * @hw: pointer to the HW structure
2186 *
2187 * During S0 to Sx transition, it is possible the link remains at gig
2188 * instead of negotiating to a lower speed. Before going to Sx, set
2189 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
2190 * to a lower speed.
2191 *
2192 * Should only be called for ICH9 devices.
2193 **/
2194void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
2195{
2196 u32 phy_ctrl;
2197
2198 if (hw->mac.type == e1000_ich9lan) {
2199 phy_ctrl = er32(PHY_CTRL);
2200 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
2201 E1000_PHY_CTRL_GBE_DISABLE;
2202 ew32(PHY_CTRL, phy_ctrl);
2203 }
2204
2205 return;
2206}
2207
2208/**
2130 * e1000_cleanup_led_ich8lan - Restore the default LED operation 2209 * e1000_cleanup_led_ich8lan - Restore the default LED operation
2131 * @hw: pointer to the HW structure 2210 * @hw: pointer to the HW structure
2132 * 2211 *
@@ -2247,6 +2326,7 @@ static struct e1000_nvm_operations ich8_nvm_ops = {
2247struct e1000_info e1000_ich8_info = { 2326struct e1000_info e1000_ich8_info = {
2248 .mac = e1000_ich8lan, 2327 .mac = e1000_ich8lan,
2249 .flags = FLAG_HAS_WOL 2328 .flags = FLAG_HAS_WOL
2329 | FLAG_IS_ICH
2250 | FLAG_RX_CSUM_ENABLED 2330 | FLAG_RX_CSUM_ENABLED
2251 | FLAG_HAS_CTRLEXT_ON_LOAD 2331 | FLAG_HAS_CTRLEXT_ON_LOAD
2252 | FLAG_HAS_AMT 2332 | FLAG_HAS_AMT
@@ -2262,6 +2342,7 @@ struct e1000_info e1000_ich8_info = {
2262struct e1000_info e1000_ich9_info = { 2342struct e1000_info e1000_ich9_info = {
2263 .mac = e1000_ich9lan, 2343 .mac = e1000_ich9lan,
2264 .flags = FLAG_HAS_JUMBO_FRAMES 2344 .flags = FLAG_HAS_JUMBO_FRAMES
2345 | FLAG_IS_ICH
2265 | FLAG_HAS_WOL 2346 | FLAG_HAS_WOL
2266 | FLAG_RX_CSUM_ENABLED 2347 | FLAG_RX_CSUM_ENABLED
2267 | FLAG_HAS_CTRLEXT_ON_LOAD 2348 | FLAG_HAS_CTRLEXT_ON_LOAD
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 8991ab8911e2..8cbb40f3a506 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -43,10 +43,11 @@
43#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/smp.h> 45#include <linux/smp.h>
46#include <linux/pm_qos_params.h>
46 47
47#include "e1000.h" 48#include "e1000.h"
48 49
49#define DRV_VERSION "0.2.1" 50#define DRV_VERSION "0.3.3.3-k2"
50char e1000e_driver_name[] = "e1000e"; 51char e1000e_driver_name[] = "e1000e";
51const char e1000e_driver_version[] = DRV_VERSION; 52const char e1000e_driver_version[] = DRV_VERSION;
52 53
@@ -341,6 +342,89 @@ no_buffers:
341} 342}
342 343
343/** 344/**
345 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
346 * @adapter: address of board private structure
347 * @rx_ring: pointer to receive ring structure
348 * @cleaned_count: number of buffers to allocate this pass
349 **/
350
351static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
352 int cleaned_count)
353{
354 struct net_device *netdev = adapter->netdev;
355 struct pci_dev *pdev = adapter->pdev;
356 struct e1000_rx_desc *rx_desc;
357 struct e1000_ring *rx_ring = adapter->rx_ring;
358 struct e1000_buffer *buffer_info;
359 struct sk_buff *skb;
360 unsigned int i;
361 unsigned int bufsz = 256 -
362 16 /* for skb_reserve */ -
363 NET_IP_ALIGN;
364
365 i = rx_ring->next_to_use;
366 buffer_info = &rx_ring->buffer_info[i];
367
368 while (cleaned_count--) {
369 skb = buffer_info->skb;
370 if (skb) {
371 skb_trim(skb, 0);
372 goto check_page;
373 }
374
375 skb = netdev_alloc_skb(netdev, bufsz);
376 if (unlikely(!skb)) {
377 /* Better luck next round */
378 adapter->alloc_rx_buff_failed++;
379 break;
380 }
381
382 /* Make buffer alignment 2 beyond a 16 byte boundary
383 * this will result in a 16 byte aligned IP header after
384 * the 14 byte MAC header is removed
385 */
386 skb_reserve(skb, NET_IP_ALIGN);
387
388 buffer_info->skb = skb;
389check_page:
390 /* allocate a new page if necessary */
391 if (!buffer_info->page) {
392 buffer_info->page = alloc_page(GFP_ATOMIC);
393 if (unlikely(!buffer_info->page)) {
394 adapter->alloc_rx_buff_failed++;
395 break;
396 }
397 }
398
399 if (!buffer_info->dma)
400 buffer_info->dma = pci_map_page(pdev,
401 buffer_info->page, 0,
402 PAGE_SIZE,
403 PCI_DMA_FROMDEVICE);
404
405 rx_desc = E1000_RX_DESC(*rx_ring, i);
406 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
407
408 if (unlikely(++i == rx_ring->count))
409 i = 0;
410 buffer_info = &rx_ring->buffer_info[i];
411 }
412
413 if (likely(rx_ring->next_to_use != i)) {
414 rx_ring->next_to_use = i;
415 if (unlikely(i-- == 0))
416 i = (rx_ring->count - 1);
417
418 /* Force memory writes to complete before letting h/w
419 * know there are new descriptors to fetch. (Only
420 * applicable for weak-ordered memory model archs,
421 * such as IA-64). */
422 wmb();
423 writel(i, adapter->hw.hw_addr + rx_ring->tail);
424 }
425}
426
427/**
344 * e1000_clean_rx_irq - Send received data up the network stack; legacy 428 * e1000_clean_rx_irq - Send received data up the network stack; legacy
345 * @adapter: board private structure 429 * @adapter: board private structure
346 * 430 *
@@ -783,6 +867,186 @@ next_desc:
783} 867}
784 868
785/** 869/**
870 * e1000_consume_page - helper function
871 **/
872static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
873 u16 length)
874{
875 bi->page = NULL;
876 skb->len += length;
877 skb->data_len += length;
878 skb->truesize += length;
879}
880
881/**
882 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
883 * @adapter: board private structure
884 *
885 * the return value indicates whether actual cleaning was done, there
886 * is no guarantee that everything was cleaned
887 **/
888
889static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
890 int *work_done, int work_to_do)
891{
892 struct net_device *netdev = adapter->netdev;
893 struct pci_dev *pdev = adapter->pdev;
894 struct e1000_ring *rx_ring = adapter->rx_ring;
895 struct e1000_rx_desc *rx_desc, *next_rxd;
896 struct e1000_buffer *buffer_info, *next_buffer;
897 u32 length;
898 unsigned int i;
899 int cleaned_count = 0;
900 bool cleaned = false;
901 unsigned int total_rx_bytes=0, total_rx_packets=0;
902
903 i = rx_ring->next_to_clean;
904 rx_desc = E1000_RX_DESC(*rx_ring, i);
905 buffer_info = &rx_ring->buffer_info[i];
906
907 while (rx_desc->status & E1000_RXD_STAT_DD) {
908 struct sk_buff *skb;
909 u8 status;
910
911 if (*work_done >= work_to_do)
912 break;
913 (*work_done)++;
914
915 status = rx_desc->status;
916 skb = buffer_info->skb;
917 buffer_info->skb = NULL;
918
919 ++i;
920 if (i == rx_ring->count)
921 i = 0;
922 next_rxd = E1000_RX_DESC(*rx_ring, i);
923 prefetch(next_rxd);
924
925 next_buffer = &rx_ring->buffer_info[i];
926
927 cleaned = true;
928 cleaned_count++;
929 pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
930 PCI_DMA_FROMDEVICE);
931 buffer_info->dma = 0;
932
933 length = le16_to_cpu(rx_desc->length);
934
935 /* errors is only valid for DD + EOP descriptors */
936 if (unlikely((status & E1000_RXD_STAT_EOP) &&
937 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
938 /* recycle both page and skb */
939 buffer_info->skb = skb;
940 /* an error means any chain goes out the window
941 * too */
942 if (rx_ring->rx_skb_top)
943 dev_kfree_skb(rx_ring->rx_skb_top);
944 rx_ring->rx_skb_top = NULL;
945 goto next_desc;
946 }
947
948#define rxtop rx_ring->rx_skb_top
949 if (!(status & E1000_RXD_STAT_EOP)) {
950 /* this descriptor is only the beginning (or middle) */
951 if (!rxtop) {
952 /* this is the beginning of a chain */
953 rxtop = skb;
954 skb_fill_page_desc(rxtop, 0, buffer_info->page,
955 0, length);
956 } else {
957 /* this is the middle of a chain */
958 skb_fill_page_desc(rxtop,
959 skb_shinfo(rxtop)->nr_frags,
960 buffer_info->page, 0, length);
961 /* re-use the skb, only consumed the page */
962 buffer_info->skb = skb;
963 }
964 e1000_consume_page(buffer_info, rxtop, length);
965 goto next_desc;
966 } else {
967 if (rxtop) {
968 /* end of the chain */
969 skb_fill_page_desc(rxtop,
970 skb_shinfo(rxtop)->nr_frags,
971 buffer_info->page, 0, length);
972 /* re-use the current skb, we only consumed the
973 * page */
974 buffer_info->skb = skb;
975 skb = rxtop;
976 rxtop = NULL;
977 e1000_consume_page(buffer_info, skb, length);
978 } else {
979 /* no chain, got EOP, this buf is the packet
980 * copybreak to save the put_page/alloc_page */
981 if (length <= copybreak &&
982 skb_tailroom(skb) >= length) {
983 u8 *vaddr;
984 vaddr = kmap_atomic(buffer_info->page,
985 KM_SKB_DATA_SOFTIRQ);
986 memcpy(skb_tail_pointer(skb), vaddr,
987 length);
988 kunmap_atomic(vaddr,
989 KM_SKB_DATA_SOFTIRQ);
990 /* re-use the page, so don't erase
991 * buffer_info->page */
992 skb_put(skb, length);
993 } else {
994 skb_fill_page_desc(skb, 0,
995 buffer_info->page, 0,
996 length);
997 e1000_consume_page(buffer_info, skb,
998 length);
999 }
1000 }
1001 }
1002
1003 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1004 e1000_rx_checksum(adapter,
1005 (u32)(status) |
1006 ((u32)(rx_desc->errors) << 24),
1007 le16_to_cpu(rx_desc->csum), skb);
1008
1009 /* probably a little skewed due to removing CRC */
1010 total_rx_bytes += skb->len;
1011 total_rx_packets++;
1012
1013 /* eth type trans needs skb->data to point to something */
1014 if (!pskb_may_pull(skb, ETH_HLEN)) {
1015 ndev_err(netdev, "pskb_may_pull failed.\n");
1016 dev_kfree_skb(skb);
1017 goto next_desc;
1018 }
1019
1020 e1000_receive_skb(adapter, netdev, skb, status,
1021 rx_desc->special);
1022
1023next_desc:
1024 rx_desc->status = 0;
1025
1026 /* return some buffers to hardware, one at a time is too slow */
1027 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1028 adapter->alloc_rx_buf(adapter, cleaned_count);
1029 cleaned_count = 0;
1030 }
1031
1032 /* use prefetched values */
1033 rx_desc = next_rxd;
1034 buffer_info = next_buffer;
1035 }
1036 rx_ring->next_to_clean = i;
1037
1038 cleaned_count = e1000_desc_unused(rx_ring);
1039 if (cleaned_count)
1040 adapter->alloc_rx_buf(adapter, cleaned_count);
1041
1042 adapter->total_rx_bytes += total_rx_bytes;
1043 adapter->total_rx_packets += total_rx_packets;
1044 adapter->net_stats.rx_bytes += total_rx_bytes;
1045 adapter->net_stats.rx_packets += total_rx_packets;
1046 return cleaned;
1047}
1048
1049/**
786 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1050 * e1000_clean_rx_ring - Free Rx Buffers per Queue
787 * @adapter: board private structure 1051 * @adapter: board private structure
788 **/ 1052 **/
@@ -802,6 +1066,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
802 pci_unmap_single(pdev, buffer_info->dma, 1066 pci_unmap_single(pdev, buffer_info->dma,
803 adapter->rx_buffer_len, 1067 adapter->rx_buffer_len,
804 PCI_DMA_FROMDEVICE); 1068 PCI_DMA_FROMDEVICE);
1069 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1070 pci_unmap_page(pdev, buffer_info->dma,
1071 PAGE_SIZE,
1072 PCI_DMA_FROMDEVICE);
805 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1073 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
806 pci_unmap_single(pdev, buffer_info->dma, 1074 pci_unmap_single(pdev, buffer_info->dma,
807 adapter->rx_ps_bsize0, 1075 adapter->rx_ps_bsize0,
@@ -809,6 +1077,11 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
809 buffer_info->dma = 0; 1077 buffer_info->dma = 0;
810 } 1078 }
811 1079
1080 if (buffer_info->page) {
1081 put_page(buffer_info->page);
1082 buffer_info->page = NULL;
1083 }
1084
812 if (buffer_info->skb) { 1085 if (buffer_info->skb) {
813 dev_kfree_skb(buffer_info->skb); 1086 dev_kfree_skb(buffer_info->skb);
814 buffer_info->skb = NULL; 1087 buffer_info->skb = NULL;
@@ -1755,10 +2028,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1755 * a lot of memory, since we allocate 3 pages at all times 2028 * a lot of memory, since we allocate 3 pages at all times
1756 * per packet. 2029 * per packet.
1757 */ 2030 */
1758 adapter->rx_ps_pages = 0;
1759 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2031 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1760 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2032 if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
2033 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
1761 adapter->rx_ps_pages = pages; 2034 adapter->rx_ps_pages = pages;
2035 else
2036 adapter->rx_ps_pages = 0;
1762 2037
1763 if (adapter->rx_ps_pages) { 2038 if (adapter->rx_ps_pages) {
1764 /* Configure extra packet-split registers */ 2039 /* Configure extra packet-split registers */
@@ -1819,9 +2094,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1819 sizeof(union e1000_rx_desc_packet_split); 2094 sizeof(union e1000_rx_desc_packet_split);
1820 adapter->clean_rx = e1000_clean_rx_irq_ps; 2095 adapter->clean_rx = e1000_clean_rx_irq_ps;
1821 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 2096 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2097 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2098 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2099 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2100 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1822 } else { 2101 } else {
1823 rdlen = rx_ring->count * 2102 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
1824 sizeof(struct e1000_rx_desc);
1825 adapter->clean_rx = e1000_clean_rx_irq; 2103 adapter->clean_rx = e1000_clean_rx_irq;
1826 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 2104 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1827 } 2105 }
@@ -1885,8 +2163,21 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1885 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 2163 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
1886 */ 2164 */
1887 if ((adapter->flags & FLAG_HAS_ERT) && 2165 if ((adapter->flags & FLAG_HAS_ERT) &&
1888 (adapter->netdev->mtu > ETH_DATA_LEN)) 2166 (adapter->netdev->mtu > ETH_DATA_LEN)) {
1889 ew32(ERT, E1000_ERT_2048); 2167 u32 rxdctl = er32(RXDCTL(0));
2168 ew32(RXDCTL(0), rxdctl | 0x3);
2169 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2170 /*
2171 * With jumbo frames and early-receive enabled, excessive
2172 * C4->C2 latencies result in dropped transactions.
2173 */
2174 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2175 e1000e_driver_name, 55);
2176 } else {
2177 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2178 e1000e_driver_name,
2179 PM_QOS_DEFAULT_VALUE);
2180 }
1890 2181
1891 /* Enable Receives */ 2182 /* Enable Receives */
1892 ew32(RCTL, rctl); 2183 ew32(RCTL, rctl);
@@ -2155,6 +2446,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
2155 2446
2156 /* Allow time for pending master requests to run */ 2447 /* Allow time for pending master requests to run */
2157 mac->ops.reset_hw(hw); 2448 mac->ops.reset_hw(hw);
2449
2450 /*
2451 * For parts with AMT enabled, let the firmware know
2452 * that the network interface is in control
2453 */
2454 if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw))
2455 e1000_get_hw_control(adapter);
2456
2158 ew32(WUC, 0); 2457 ew32(WUC, 0);
2159 2458
2160 if (mac->ops.init_hw(hw)) 2459 if (mac->ops.init_hw(hw))
@@ -3469,6 +3768,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3469 * means we reserve 2 more, this pushes us to allocate from the next 3768 * means we reserve 2 more, this pushes us to allocate from the next
3470 * larger slab size. 3769 * larger slab size.
3471 * i.e. RXBUFFER_2048 --> size-4096 slab 3770 * i.e. RXBUFFER_2048 --> size-4096 slab
3771 * However with the new *_jumbo_rx* routines, jumbo receives will use
3772 * fragmented skbs
3472 */ 3773 */
3473 3774
3474 if (max_frame <= 256) 3775 if (max_frame <= 256)
@@ -3626,6 +3927,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3626 ew32(CTRL_EXT, ctrl_ext); 3927 ew32(CTRL_EXT, ctrl_ext);
3627 } 3928 }
3628 3929
3930 if (adapter->flags & FLAG_IS_ICH)
3931 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
3932
3629 /* Allow time for pending master requests to run */ 3933 /* Allow time for pending master requests to run */
3630 e1000e_disable_pcie_master(&adapter->hw); 3934 e1000e_disable_pcie_master(&adapter->hw);
3631 3935
@@ -4292,6 +4596,13 @@ static struct pci_device_id e1000_pci_tbl[] = {
4292 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 4596 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
4293 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 4597 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
4294 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 4598 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
4599 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
4600 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
4601 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
4602
4603 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
4604 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
4605 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
4295 4606
4296 { } /* terminate list */ 4607 { } /* terminate list */
4297}; 4608};
@@ -4326,7 +4637,9 @@ static int __init e1000_init_module(void)
4326 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", 4637 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
4327 e1000e_driver_name); 4638 e1000e_driver_name);
4328 ret = pci_register_driver(&e1000_driver); 4639 ret = pci_register_driver(&e1000_driver);
4329 4640 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name,
4641 PM_QOS_DEFAULT_VALUE);
4642
4330 return ret; 4643 return ret;
4331} 4644}
4332module_init(e1000_init_module); 4645module_init(e1000_init_module);
@@ -4340,6 +4653,7 @@ module_init(e1000_init_module);
4340static void __exit e1000_exit_module(void) 4653static void __exit e1000_exit_module(void)
4341{ 4654{
4342 pci_unregister_driver(&e1000_driver); 4655 pci_unregister_driver(&e1000_driver);
4656 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
4343} 4657}
4344module_exit(e1000_exit_module); 4658module_exit(e1000_exit_module);
4345 4659
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index e102332a6bee..b133dcf0e950 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -34,6 +34,9 @@ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
34static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); 34static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
35static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); 35static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
36static s32 e1000_wait_autoneg(struct e1000_hw *hw); 36static s32 e1000_wait_autoneg(struct e1000_hw *hw);
37static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
38static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
39 u16 *data, bool read);
37 40
38/* Cable length tables */ 41/* Cable length tables */
39static const u16 e1000_m88_cable_length_table[] = 42static const u16 e1000_m88_cable_length_table[] =
@@ -465,6 +468,10 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
465 if (phy->disable_polarity_correction == 1) 468 if (phy->disable_polarity_correction == 1)
466 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 469 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
467 470
471 /* Enable downshift on BM (disabled by default) */
472 if (phy->type == e1000_phy_bm)
473 phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
474
468 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 475 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
469 if (ret_val) 476 if (ret_val)
470 return ret_val; 477 return ret_val;
@@ -1776,6 +1783,10 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
1776 case IFE_C_E_PHY_ID: 1783 case IFE_C_E_PHY_ID:
1777 phy_type = e1000_phy_ife; 1784 phy_type = e1000_phy_ife;
1778 break; 1785 break;
1786 case BME1000_E_PHY_ID:
1787 case BME1000_E_PHY_ID_R2:
1788 phy_type = e1000_phy_bm;
1789 break;
1779 default: 1790 default:
1780 phy_type = e1000_phy_unknown; 1791 phy_type = e1000_phy_unknown;
1781 break; 1792 break;
@@ -1784,6 +1795,273 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
1784} 1795}
1785 1796
1786/** 1797/**
1798 * e1000e_determine_phy_address - Determines PHY address.
1799 * @hw: pointer to the HW structure
1800 *
1801 * This uses a trial and error method to loop through possible PHY
1802 * addresses. It tests each by reading the PHY ID registers and
1803 * checking for a match.
1804 **/
1805s32 e1000e_determine_phy_address(struct e1000_hw *hw)
1806{
1807 s32 ret_val = -E1000_ERR_PHY_TYPE;
1808 u32 phy_addr= 0;
1809 u32 i = 0;
1810 enum e1000_phy_type phy_type = e1000_phy_unknown;
1811
1812 do {
1813 for (phy_addr = 0; phy_addr < 4; phy_addr++) {
1814 hw->phy.addr = phy_addr;
1815 e1000e_get_phy_id(hw);
1816 phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
1817
1818 /*
1819 * If phy_type is valid, break - we found our
1820 * PHY address
1821 */
1822 if (phy_type != e1000_phy_unknown) {
1823 ret_val = 0;
1824 break;
1825 }
1826 }
1827 i++;
1828 } while ((ret_val != 0) && (i < 100));
1829
1830 return ret_val;
1831}
1832
1833/**
1834 * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
1835 * @page: page to access
1836 *
1837 * Returns the phy address for the page requested.
1838 **/
1839static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
1840{
1841 u32 phy_addr = 2;
1842
1843 if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
1844 phy_addr = 1;
1845
1846 return phy_addr;
1847}
1848
1849/**
1850 * e1000e_write_phy_reg_bm - Write BM PHY register
1851 * @hw: pointer to the HW structure
1852 * @offset: register offset to write to
1853 * @data: data to write at register offset
1854 *
1855 * Acquires semaphore, if necessary, then writes the data to PHY register
1856 * at the offset. Release any acquired semaphores before exiting.
1857 **/
1858s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
1859{
1860 s32 ret_val;
1861 u32 page_select = 0;
1862 u32 page = offset >> IGP_PAGE_SHIFT;
1863 u32 page_shift = 0;
1864
1865 /* Page 800 works differently than the rest so it has its own func */
1866 if (page == BM_WUC_PAGE) {
1867 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
1868 false);
1869 goto out;
1870 }
1871
1872 ret_val = hw->phy.ops.acquire_phy(hw);
1873 if (ret_val)
1874 goto out;
1875
1876 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
1877
1878 if (offset > MAX_PHY_MULTI_PAGE_REG) {
1879 /*
1880 * Page select is register 31 for phy address 1 and 22 for
1881 * phy address 2 and 3. Page select is shifted only for
1882 * phy address 1.
1883 */
1884 if (hw->phy.addr == 1) {
1885 page_shift = IGP_PAGE_SHIFT;
1886 page_select = IGP01E1000_PHY_PAGE_SELECT;
1887 } else {
1888 page_shift = 0;
1889 page_select = BM_PHY_PAGE_SELECT;
1890 }
1891
1892 /* Page is shifted left, PHY expects (page x 32) */
1893 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
1894 (page << page_shift));
1895 if (ret_val) {
1896 hw->phy.ops.release_phy(hw);
1897 goto out;
1898 }
1899 }
1900
1901 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
1902 data);
1903
1904 hw->phy.ops.release_phy(hw);
1905
1906out:
1907 return ret_val;
1908}
1909
1910/**
1911 * e1000e_read_phy_reg_bm - Read BM PHY register
1912 * @hw: pointer to the HW structure
1913 * @offset: register offset to be read
1914 * @data: pointer to the read data
1915 *
1916 * Acquires semaphore, if necessary, then reads the PHY register at offset
1917 * and storing the retrieved information in data. Release any acquired
1918 * semaphores before exiting.
1919 **/
1920s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
1921{
1922 s32 ret_val;
1923 u32 page_select = 0;
1924 u32 page = offset >> IGP_PAGE_SHIFT;
1925 u32 page_shift = 0;
1926
1927 /* Page 800 works differently than the rest so it has its own func */
1928 if (page == BM_WUC_PAGE) {
1929 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
1930 true);
1931 goto out;
1932 }
1933
1934 ret_val = hw->phy.ops.acquire_phy(hw);
1935 if (ret_val)
1936 goto out;
1937
1938 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
1939
1940 if (offset > MAX_PHY_MULTI_PAGE_REG) {
1941 /*
1942 * Page select is register 31 for phy address 1 and 22 for
1943 * phy address 2 and 3. Page select is shifted only for
1944 * phy address 1.
1945 */
1946 if (hw->phy.addr == 1) {
1947 page_shift = IGP_PAGE_SHIFT;
1948 page_select = IGP01E1000_PHY_PAGE_SELECT;
1949 } else {
1950 page_shift = 0;
1951 page_select = BM_PHY_PAGE_SELECT;
1952 }
1953
1954 /* Page is shifted left, PHY expects (page x 32) */
1955 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
1956 (page << page_shift));
1957 if (ret_val) {
1958 hw->phy.ops.release_phy(hw);
1959 goto out;
1960 }
1961 }
1962
1963 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
1964 data);
1965 hw->phy.ops.release_phy(hw);
1966
1967out:
1968 return ret_val;
1969}
1970
1971/**
1972 * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register
1973 * @hw: pointer to the HW structure
1974 * @offset: register offset to be read or written
1975 * @data: pointer to the data to read or write
1976 * @read: determines if operation is read or write
1977 *
1978 * Acquires semaphore, if necessary, then reads the PHY register at offset
1979 * and storing the retrieved information in data. Release any acquired
1980 * semaphores before exiting. Note that procedure to read the wakeup
1981 * registers are different. It works as such:
1982 * 1) Set page 769, register 17, bit 2 = 1
1983 * 2) Set page to 800 for host (801 if we were manageability)
1984 * 3) Write the address using the address opcode (0x11)
1985 * 4) Read or write the data using the data opcode (0x12)
1986 * 5) Restore 769_17.2 to its original value
1987 **/
1988static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
1989 u16 *data, bool read)
1990{
1991 s32 ret_val;
1992 u16 reg = ((u16)offset) & PHY_REG_MASK;
1993 u16 phy_reg = 0;
1994 u8 phy_acquired = 1;
1995
1996
1997 ret_val = hw->phy.ops.acquire_phy(hw);
1998 if (ret_val) {
1999 phy_acquired = 0;
2000 goto out;
2001 }
2002
2003 /* All operations in this function are phy address 1 */
2004 hw->phy.addr = 1;
2005
2006 /* Set page 769 */
2007 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2008 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
2009
2010 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
2011 if (ret_val)
2012 goto out;
2013
2014 /* First clear bit 4 to avoid a power state change */
2015 phy_reg &= ~(BM_WUC_HOST_WU_BIT);
2016 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
2017 if (ret_val)
2018 goto out;
2019
2020 /* Write bit 2 = 1, and clear bit 4 to 769_17 */
2021 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG,
2022 phy_reg | BM_WUC_ENABLE_BIT);
2023 if (ret_val)
2024 goto out;
2025
2026 /* Select page 800 */
2027 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2028 (BM_WUC_PAGE << IGP_PAGE_SHIFT));
2029
2030 /* Write the page 800 offset value using opcode 0x11 */
2031 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
2032 if (ret_val)
2033 goto out;
2034
2035 if (read) {
2036 /* Read the page 800 value using opcode 0x12 */
2037 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
2038 data);
2039 } else {
2040 /* Read the page 800 value using opcode 0x12 */
2041 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
2042 *data);
2043 }
2044
2045 if (ret_val)
2046 goto out;
2047
2048 /*
2049 * Restore 769_17.2 to its original value
2050 * Set page 769
2051 */
2052 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2053 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
2054
2055 /* Clear 769_17.2 */
2056 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
2057
2058out:
2059 if (phy_acquired == 1)
2060 hw->phy.ops.release_phy(hw);
2061 return ret_val;
2062}
2063
2064/**
1787 * e1000e_commit_phy - Soft PHY reset 2065 * e1000e_commit_phy - Soft PHY reset
1788 * @hw: pointer to the HW structure 2066 * @hw: pointer to the HW structure
1789 * 2067 *
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 2eb82aba4a8b..795c594a4b7c 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -202,7 +202,7 @@ static unsigned short start_code[] = {
202 0x0000,Cmd_MCast, 202 0x0000,Cmd_MCast,
203 0x0076, /* link to next command */ 203 0x0076, /* link to next command */
204#define CONF_NR_MULTICAST 0x44 204#define CONF_NR_MULTICAST 0x44
205 0x0000, /* number of multicast addresses */ 205 0x0000, /* number of bytes in multicast address(es) */
206#define CONF_MULTICAST 0x46 206#define CONF_MULTICAST 0x46
207 0x0000, 0x0000, 0x0000, /* some addresses */ 207 0x0000, 0x0000, 0x0000, /* some addresses */
208 0x0000, 0x0000, 0x0000, 208 0x0000, 0x0000, 0x0000,
@@ -1569,7 +1569,7 @@ static void eexp_hw_init586(struct net_device *dev)
1569 1569
1570static void eexp_setup_filter(struct net_device *dev) 1570static void eexp_setup_filter(struct net_device *dev)
1571{ 1571{
1572 struct dev_mc_list *dmi = dev->mc_list; 1572 struct dev_mc_list *dmi;
1573 unsigned short ioaddr = dev->base_addr; 1573 unsigned short ioaddr = dev->base_addr;
1574 int count = dev->mc_count; 1574 int count = dev->mc_count;
1575 int i; 1575 int i;
@@ -1580,9 +1580,9 @@ static void eexp_setup_filter(struct net_device *dev)
1580 } 1580 }
1581 1581
1582 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); 1582 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
1583 outw(count, ioaddr+SHADOW(CONF_NR_MULTICAST)); 1583 outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
1584 for (i = 0; i < count; i++) { 1584 for (i = 0, dmi = dev->mc_list; i < count; i++, dmi = dmi->next) {
1585 unsigned short *data = (unsigned short *)dmi->dmi_addr; 1585 unsigned short *data;
1586 if (!dmi) { 1586 if (!dmi) {
1587 printk(KERN_INFO "%s: too few multicast addresses\n", dev->name); 1587 printk(KERN_INFO "%s: too few multicast addresses\n", dev->name);
1588 break; 1588 break;
@@ -1591,6 +1591,7 @@ static void eexp_setup_filter(struct net_device *dev)
1591 printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name); 1591 printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
1592 continue; 1592 continue;
1593 } 1593 }
1594 data = (unsigned short *)dmi->dmi_addr;
1594 outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR); 1595 outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
1595 outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i))); 1596 outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
1596 outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR); 1597 outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index f5dacceab95b..fe872fbd671e 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0090" 43#define DRV_VERSION "EHEA_0091"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
@@ -118,6 +118,13 @@
118#define EHEA_MR_ACC_CTRL 0x00800000 118#define EHEA_MR_ACC_CTRL 0x00800000
119 119
120#define EHEA_BUSMAP_START 0x8000000000000000ULL 120#define EHEA_BUSMAP_START 0x8000000000000000ULL
121#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
122#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
123#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
124#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
125#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
126#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
127
121 128
122#define EHEA_WATCH_DOG_TIMEOUT 10*HZ 129#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
123 130
@@ -192,10 +199,20 @@ struct h_epas {
192 set to 0 if unused */ 199 set to 0 if unused */
193}; 200};
194 201
195struct ehea_busmap { 202/*
196 unsigned int entries; /* total number of entries */ 203 * Memory map data structures
197 unsigned int valid_sections; /* number of valid sections */ 204 */
198 u64 *vaddr; 205struct ehea_dir_bmap
206{
207 u64 ent[EHEA_MAP_ENTRIES];
208};
209struct ehea_top_bmap
210{
211 struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
212};
213struct ehea_bmap
214{
215 struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
199}; 216};
200 217
201struct ehea_qp; 218struct ehea_qp;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f9bc21c74b59..d1b6d4e7495d 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -35,6 +35,7 @@
35#include <linux/if_ether.h> 35#include <linux/if_ether.h>
36#include <linux/notifier.h> 36#include <linux/notifier.h>
37#include <linux/reboot.h> 37#include <linux/reboot.h>
38#include <linux/memory.h>
38#include <asm/kexec.h> 39#include <asm/kexec.h>
39#include <linux/mutex.h> 40#include <linux/mutex.h>
40 41
@@ -3503,6 +3504,24 @@ void ehea_crash_handler(void)
3503 0, H_DEREG_BCMC); 3504 0, H_DEREG_BCMC);
3504} 3505}
3505 3506
3507static int ehea_mem_notifier(struct notifier_block *nb,
3508 unsigned long action, void *data)
3509{
3510 switch (action) {
3511 case MEM_OFFLINE:
3512 ehea_info("memory has been removed");
3513 ehea_rereg_mrs(NULL);
3514 break;
3515 default:
3516 break;
3517 }
3518 return NOTIFY_OK;
3519}
3520
3521static struct notifier_block ehea_mem_nb = {
3522 .notifier_call = ehea_mem_notifier,
3523};
3524
3506static int ehea_reboot_notifier(struct notifier_block *nb, 3525static int ehea_reboot_notifier(struct notifier_block *nb,
3507 unsigned long action, void *unused) 3526 unsigned long action, void *unused)
3508{ 3527{
@@ -3581,6 +3600,10 @@ int __init ehea_module_init(void)
3581 if (ret) 3600 if (ret)
3582 ehea_info("failed registering reboot notifier"); 3601 ehea_info("failed registering reboot notifier");
3583 3602
3603 ret = register_memory_notifier(&ehea_mem_nb);
3604 if (ret)
3605 ehea_info("failed registering memory remove notifier");
3606
3584 ret = crash_shutdown_register(&ehea_crash_handler); 3607 ret = crash_shutdown_register(&ehea_crash_handler);
3585 if (ret) 3608 if (ret)
3586 ehea_info("failed registering crash handler"); 3609 ehea_info("failed registering crash handler");
@@ -3604,6 +3627,7 @@ int __init ehea_module_init(void)
3604out3: 3627out3:
3605 ibmebus_unregister_driver(&ehea_driver); 3628 ibmebus_unregister_driver(&ehea_driver);
3606out2: 3629out2:
3630 unregister_memory_notifier(&ehea_mem_nb);
3607 unregister_reboot_notifier(&ehea_reboot_nb); 3631 unregister_reboot_notifier(&ehea_reboot_nb);
3608 crash_shutdown_unregister(&ehea_crash_handler); 3632 crash_shutdown_unregister(&ehea_crash_handler);
3609out: 3633out:
@@ -3621,6 +3645,7 @@ static void __exit ehea_module_exit(void)
3621 ret = crash_shutdown_unregister(&ehea_crash_handler); 3645 ret = crash_shutdown_unregister(&ehea_crash_handler);
3622 if (ret) 3646 if (ret)
3623 ehea_info("failed unregistering crash handler"); 3647 ehea_info("failed unregistering crash handler");
3648 unregister_memory_notifier(&ehea_mem_nb);
3624 kfree(ehea_fw_handles.arr); 3649 kfree(ehea_fw_handles.arr);
3625 kfree(ehea_bcmc_regs.arr); 3650 kfree(ehea_bcmc_regs.arr);
3626 ehea_destroy_busmap(); 3651 ehea_destroy_busmap();
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index d522e905f460..140f05baafd8 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -31,8 +31,8 @@
31#include "ehea_phyp.h" 31#include "ehea_phyp.h"
32#include "ehea_qmr.h" 32#include "ehea_qmr.h"
33 33
34struct ehea_bmap *ehea_bmap = NULL;
34 35
35struct ehea_busmap ehea_bmap = { 0, 0, NULL };
36 36
37 37
38static void *hw_qpageit_get_inc(struct hw_queue *queue) 38static void *hw_qpageit_get_inc(struct hw_queue *queue)
@@ -559,125 +559,253 @@ int ehea_destroy_qp(struct ehea_qp *qp)
559 return 0; 559 return 0;
560} 560}
561 561
562int ehea_create_busmap(void) 562static inline int ehea_calc_index(unsigned long i, unsigned long s)
563{ 563{
564 u64 vaddr = EHEA_BUSMAP_START; 564 return (i >> s) & EHEA_INDEX_MASK;
565 unsigned long high_section_index = 0; 565}
566 int i;
567 566
568 /* 567static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
569 * Sections are not in ascending order -> Loop over all sections and 568 int dir)
570 * find the highest PFN to compute the required map size. 569{
571 */ 570 if(!ehea_top_bmap->dir[dir]) {
572 ehea_bmap.valid_sections = 0; 571 ehea_top_bmap->dir[dir] =
572 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
573 if (!ehea_top_bmap->dir[dir])
574 return -ENOMEM;
575 }
576 return 0;
577}
573 578
574 for (i = 0; i < NR_MEM_SECTIONS; i++) 579static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
575 if (valid_section_nr(i)) 580{
576 high_section_index = i; 581 if(!ehea_bmap->top[top]) {
582 ehea_bmap->top[top] =
583 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
584 if (!ehea_bmap->top[top])
585 return -ENOMEM;
586 }
587 return ehea_init_top_bmap(ehea_bmap->top[top], dir);
588}
577 589
578 ehea_bmap.entries = high_section_index + 1; 590static int ehea_create_busmap_callback(unsigned long pfn,
579 ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); 591 unsigned long nr_pages, void *arg)
592{
593 unsigned long i, mr_len, start_section, end_section;
594 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
596 mr_len = *(unsigned long *)arg;
580 597
581 if (!ehea_bmap.vaddr) 598 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
599 if (!ehea_bmap)
582 return -ENOMEM; 600 return -ENOMEM;
583 601
584 for (i = 0 ; i < ehea_bmap.entries; i++) { 602 for (i = start_section; i < end_section; i++) {
585 unsigned long pfn = section_nr_to_pfn(i); 603 int ret;
604 int top, dir, idx;
605 u64 vaddr;
606
607 top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
608 dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
609
610 ret = ehea_init_bmap(ehea_bmap, top, dir);
611 if(ret)
612 return ret;
586 613
587 if (pfn_valid(pfn)) { 614 idx = i & EHEA_INDEX_MASK;
588 ehea_bmap.vaddr[i] = vaddr; 615 vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE;
589 vaddr += EHEA_SECTSIZE; 616
590 ehea_bmap.valid_sections++; 617 ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr;
591 } else
592 ehea_bmap.vaddr[i] = 0;
593 } 618 }
594 619
620 mr_len += nr_pages * PAGE_SIZE;
621 *(unsigned long *)arg = mr_len;
622
595 return 0; 623 return 0;
596} 624}
597 625
626static unsigned long ehea_mr_len;
627
628static DEFINE_MUTEX(ehea_busmap_mutex);
629
630int ehea_create_busmap(void)
631{
632 int ret;
633 mutex_lock(&ehea_busmap_mutex);
634 ehea_mr_len = 0;
635 ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len,
636 ehea_create_busmap_callback);
637 mutex_unlock(&ehea_busmap_mutex);
638 return ret;
639}
640
598void ehea_destroy_busmap(void) 641void ehea_destroy_busmap(void)
599{ 642{
600 vfree(ehea_bmap.vaddr); 643 int top, dir;
644 mutex_lock(&ehea_busmap_mutex);
645 if (!ehea_bmap)
646 goto out_destroy;
647
648 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
649 if (!ehea_bmap->top[top])
650 continue;
651
652 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
653 if (!ehea_bmap->top[top]->dir[dir])
654 continue;
655
656 kfree(ehea_bmap->top[top]->dir[dir]);
657 }
658
659 kfree(ehea_bmap->top[top]);
660 }
661
662 kfree(ehea_bmap);
663 ehea_bmap = NULL;
664out_destroy:
665 mutex_unlock(&ehea_busmap_mutex);
601} 666}
602 667
603u64 ehea_map_vaddr(void *caddr) 668u64 ehea_map_vaddr(void *caddr)
604{ 669{
605 u64 mapped_addr; 670 int top, dir, idx;
606 unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS; 671 unsigned long index, offset;
607 672
608 if (likely(index < ehea_bmap.entries)) { 673 if (!ehea_bmap)
609 mapped_addr = ehea_bmap.vaddr[index]; 674 return EHEA_INVAL_ADDR;
610 if (likely(mapped_addr)) 675
611 mapped_addr |= (((unsigned long)caddr) 676 index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
612 & (EHEA_SECTSIZE - 1)); 677 top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
613 else 678 if (!ehea_bmap->top[top])
614 mapped_addr = -1; 679 return EHEA_INVAL_ADDR;
615 } else 680
616 mapped_addr = -1; 681 dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
617 682 if (!ehea_bmap->top[top]->dir[dir])
618 if (unlikely(mapped_addr == -1)) 683 return EHEA_INVAL_ADDR;
619 if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) 684
620 schedule_work(&ehea_rereg_mr_task); 685 idx = index & EHEA_INDEX_MASK;
621 686 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
622 return mapped_addr; 687 return EHEA_INVAL_ADDR;
688
689 offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
690 return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
691}
692
693static inline void *ehea_calc_sectbase(int top, int dir, int idx)
694{
695 unsigned long ret = idx;
696 ret |= dir << EHEA_DIR_INDEX_SHIFT;
697 ret |= top << EHEA_TOP_INDEX_SHIFT;
698 return abs_to_virt(ret << SECTION_SIZE_BITS);
699}
700
701static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
702 struct ehea_adapter *adapter,
703 struct ehea_mr *mr)
704{
705 void *pg;
706 u64 j, m, hret;
707 unsigned long k = 0;
708 u64 pt_abs = virt_to_abs(pt);
709
710 void *sectbase = ehea_calc_sectbase(top, dir, idx);
711
712 for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
713
714 for (m = 0; m < EHEA_MAX_RPAGE; m++) {
715 pg = sectbase + ((k++) * EHEA_PAGESIZE);
716 pt[m] = virt_to_abs(pg);
717 }
718 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
719 0, pt_abs, EHEA_MAX_RPAGE);
720
721 if ((hret != H_SUCCESS)
722 && (hret != H_PAGE_REGISTERED)) {
723 ehea_h_free_resource(adapter->handle, mr->handle,
724 FORCE_FREE);
725 ehea_error("register_rpage_mr failed");
726 return hret;
727 }
728 }
729 return hret;
730}
731
732static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
733 struct ehea_adapter *adapter,
734 struct ehea_mr *mr)
735{
736 u64 hret = H_SUCCESS;
737 int idx;
738
739 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
740 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
741 continue;
742
743 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
744 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
745 return hret;
746 }
747 return hret;
748}
749
750static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
751 struct ehea_adapter *adapter,
752 struct ehea_mr *mr)
753{
754 u64 hret = H_SUCCESS;
755 int dir;
756
757 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
758 if (!ehea_bmap->top[top]->dir[dir])
759 continue;
760
761 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
762 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
763 return hret;
764 }
765 return hret;
623} 766}
624 767
625int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) 768int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
626{ 769{
627 int ret; 770 int ret;
628 u64 *pt; 771 u64 *pt;
629 void *pg; 772 u64 hret;
630 u64 hret, pt_abs, i, j, m, mr_len;
631 u32 acc_ctrl = EHEA_MR_ACC_CTRL; 773 u32 acc_ctrl = EHEA_MR_ACC_CTRL;
632 774
633 mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; 775 unsigned long top;
634 776
635 pt = kzalloc(PAGE_SIZE, GFP_KERNEL); 777 pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
636 if (!pt) { 778 if (!pt) {
637 ehea_error("no mem"); 779 ehea_error("no mem");
638 ret = -ENOMEM; 780 ret = -ENOMEM;
639 goto out; 781 goto out;
640 } 782 }
641 pt_abs = virt_to_abs(pt);
642 783
643 hret = ehea_h_alloc_resource_mr(adapter->handle, 784 hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
644 EHEA_BUSMAP_START, mr_len, 785 ehea_mr_len, acc_ctrl, adapter->pd,
645 acc_ctrl, adapter->pd,
646 &mr->handle, &mr->lkey); 786 &mr->handle, &mr->lkey);
787
647 if (hret != H_SUCCESS) { 788 if (hret != H_SUCCESS) {
648 ehea_error("alloc_resource_mr failed"); 789 ehea_error("alloc_resource_mr failed");
649 ret = -EIO; 790 ret = -EIO;
650 goto out; 791 goto out;
651 } 792 }
652 793
653 for (i = 0 ; i < ehea_bmap.entries; i++) 794 if (!ehea_bmap) {
654 if (ehea_bmap.vaddr[i]) { 795 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
655 void *sectbase = __va(i << SECTION_SIZE_BITS); 796 ehea_error("no busmap available");
656 unsigned long k = 0; 797 ret = -EIO;
657 798 goto out;
658 for (j = 0; j < (EHEA_PAGES_PER_SECTION / 799 }
659 EHEA_MAX_RPAGE); j++) { 800
660 801 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
661 for (m = 0; m < EHEA_MAX_RPAGE; m++) { 802 if (!ehea_bmap->top[top])
662 pg = sectbase + ((k++) * EHEA_PAGESIZE); 803 continue;
663 pt[m] = virt_to_abs(pg); 804
664 } 805 hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
665 806 if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
666 hret = ehea_h_register_rpage_mr(adapter->handle, 807 break;
667 mr->handle, 808 }
668 0, 0, pt_abs,
669 EHEA_MAX_RPAGE);
670 if ((hret != H_SUCCESS)
671 && (hret != H_PAGE_REGISTERED)) {
672 ehea_h_free_resource(adapter->handle,
673 mr->handle,
674 FORCE_FREE);
675 ehea_error("register_rpage_mr failed");
676 ret = -EIO;
677 goto out;
678 }
679 }
680 }
681 809
682 if (hret != H_SUCCESS) { 810 if (hret != H_SUCCESS) {
683 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 811 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index ba75efc9f5b5..f0014cfbb275 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -194,7 +194,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
194 194
195 ret = of_address_to_resource(ofdev->node, 0, &res); 195 ret = of_address_to_resource(ofdev->node, 0, &res);
196 if (ret) 196 if (ret)
197 return ret; 197 goto out_res;
198 198
199 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); 199 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
200 200
@@ -236,6 +236,7 @@ out_free_irqs:
236 kfree(new_bus->irq); 236 kfree(new_bus->irq);
237out_unmap_regs: 237out_unmap_regs:
238 iounmap(fec->fecp); 238 iounmap(fec->fecp);
239out_res:
239out_fec: 240out_fec:
240 kfree(fec); 241 kfree(fec);
241out_mii: 242out_mii:
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 642dc633b444..393a0f175302 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -138,6 +138,7 @@ static int gfar_poll(struct napi_struct *napi, int budget);
138static void gfar_netpoll(struct net_device *dev); 138static void gfar_netpoll(struct net_device *dev);
139#endif 139#endif
140int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 140int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
141static int gfar_clean_tx_ring(struct net_device *dev);
141static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 142static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
142static void gfar_vlan_rx_register(struct net_device *netdev, 143static void gfar_vlan_rx_register(struct net_device *netdev,
143 struct vlan_group *grp); 144 struct vlan_group *grp);
@@ -634,6 +635,8 @@ static void free_skb_resources(struct gfar_private *priv)
634 dev_kfree_skb_any(priv->tx_skbuff[i]); 635 dev_kfree_skb_any(priv->tx_skbuff[i]);
635 priv->tx_skbuff[i] = NULL; 636 priv->tx_skbuff[i] = NULL;
636 } 637 }
638
639 txbdp++;
637 } 640 }
638 641
639 kfree(priv->tx_skbuff); 642 kfree(priv->tx_skbuff);
@@ -1141,7 +1144,7 @@ static int gfar_close(struct net_device *dev)
1141} 1144}
1142 1145
1143/* Changes the mac address if the controller is not running. */ 1146/* Changes the mac address if the controller is not running. */
1144int gfar_set_mac_address(struct net_device *dev) 1147static int gfar_set_mac_address(struct net_device *dev)
1145{ 1148{
1146 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 1149 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1147 1150
@@ -1260,7 +1263,7 @@ static void gfar_timeout(struct net_device *dev)
1260} 1263}
1261 1264
1262/* Interrupt Handler for Transmit complete */ 1265/* Interrupt Handler for Transmit complete */
1263int gfar_clean_tx_ring(struct net_device *dev) 1266static int gfar_clean_tx_ring(struct net_device *dev)
1264{ 1267{
1265 struct txbd8 *bdp; 1268 struct txbd8 *bdp;
1266 struct gfar_private *priv = netdev_priv(dev); 1269 struct gfar_private *priv = netdev_priv(dev);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index fd487be3993e..27f37c81e52c 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -782,5 +782,8 @@ extern void gfar_halt(struct net_device *dev);
782extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 782extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
783 int enable, u32 regnum, u32 read); 783 int enable, u32 regnum, u32 read);
784void gfar_init_sysfs(struct net_device *dev); 784void gfar_init_sysfs(struct net_device *dev);
785int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
786 int regnum, u16 value);
787int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
785 788
786#endif /* __GIANFAR_H */ 789#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 230878b94190..5116f68e01b9 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -103,10 +103,10 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
103 103
104 spin_lock_irqsave(&priv->rxlock, flags); 104 spin_lock_irqsave(&priv->rxlock, flags);
105 if (length > priv->rx_buffer_size) 105 if (length > priv->rx_buffer_size)
106 return count; 106 goto out;
107 107
108 if (length == priv->rx_stash_size) 108 if (length == priv->rx_stash_size)
109 return count; 109 goto out;
110 110
111 priv->rx_stash_size = length; 111 priv->rx_stash_size = length;
112 112
@@ -125,6 +125,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
125 125
126 gfar_write(&priv->regs->attr, temp); 126 gfar_write(&priv->regs->attr, temp);
127 127
128out:
128 spin_unlock_irqrestore(&priv->rxlock, flags); 129 spin_unlock_irqrestore(&priv->rxlock, flags);
129 130
130 return count; 131 return count;
@@ -154,10 +155,10 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
154 155
155 spin_lock_irqsave(&priv->rxlock, flags); 156 spin_lock_irqsave(&priv->rxlock, flags);
156 if (index > priv->rx_stash_size) 157 if (index > priv->rx_stash_size)
157 return count; 158 goto out;
158 159
159 if (index == priv->rx_stash_index) 160 if (index == priv->rx_stash_index)
160 return count; 161 goto out;
161 162
162 priv->rx_stash_index = index; 163 priv->rx_stash_index = index;
163 164
@@ -166,6 +167,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
166 temp |= ATTRELI_EI(index); 167 temp |= ATTRELI_EI(index);
167 gfar_write(&priv->regs->attreli, flags); 168 gfar_write(&priv->regs->attreli, flags);
168 169
170out:
169 spin_unlock_irqrestore(&priv->rxlock, flags); 171 spin_unlock_irqrestore(&priv->rxlock, flags);
170 172
171 return count; 173 return count;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index a873d2b315ca..a7714da7c283 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -100,7 +100,9 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info);
100static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); 100static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info);
101static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); 101static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
102static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); 102static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info);
103#ifdef CONFIG_PNP
103static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id); 104static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id);
105#endif
104 106
105/* These are the known NSC chips */ 107/* These are the known NSC chips */
106static nsc_chip_t chips[] = { 108static nsc_chip_t chips[] = {
@@ -156,9 +158,11 @@ static const struct pnp_device_id nsc_ircc_pnp_table[] = {
156MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table); 158MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table);
157 159
158static struct pnp_driver nsc_ircc_pnp_driver = { 160static struct pnp_driver nsc_ircc_pnp_driver = {
161#ifdef CONFIG_PNP
159 .name = "nsc-ircc", 162 .name = "nsc-ircc",
160 .id_table = nsc_ircc_pnp_table, 163 .id_table = nsc_ircc_pnp_table,
161 .probe = nsc_ircc_pnp_probe, 164 .probe = nsc_ircc_pnp_probe,
165#endif
162}; 166};
163 167
164/* Some prototypes */ 168/* Some prototypes */
@@ -916,6 +920,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
916 return 0; 920 return 0;
917} 921}
918 922
923#ifdef CONFIG_PNP
919/* PNP probing */ 924/* PNP probing */
920static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) 925static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id)
921{ 926{
@@ -952,6 +957,7 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
952 957
953 return 0; 958 return 0;
954} 959}
960#endif
955 961
956/* 962/*
957 * Function nsc_ircc_setup (info) 963 * Function nsc_ircc_setup (info)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 1f26da761e9f..cfe0194fef71 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -376,6 +376,7 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
376 376
377static int pnp_driver_registered; 377static int pnp_driver_registered;
378 378
379#ifdef CONFIG_PNP
379static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev, 380static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev,
380 const struct pnp_device_id *dev_id) 381 const struct pnp_device_id *dev_id)
381{ 382{
@@ -402,7 +403,9 @@ static struct pnp_driver smsc_ircc_pnp_driver = {
402 .id_table = smsc_ircc_pnp_table, 403 .id_table = smsc_ircc_pnp_table,
403 .probe = smsc_ircc_pnp_probe, 404 .probe = smsc_ircc_pnp_probe,
404}; 405};
405 406#else /* CONFIG_PNP */
407static struct pnp_driver smsc_ircc_pnp_driver;
408#endif
406 409
407/******************************************************************************* 410/*******************************************************************************
408 * 411 *
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 2056cfc624dc..c36a03ae9bfb 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -450,7 +450,7 @@ static void macvlan_dellink(struct net_device *dev)
450 unregister_netdevice(dev); 450 unregister_netdevice(dev);
451 451
452 if (list_empty(&port->vlans)) 452 if (list_empty(&port->vlans))
453 macvlan_port_destroy(dev); 453 macvlan_port_destroy(port->dev);
454} 454}
455 455
456static struct rtnl_link_ops macvlan_link_ops __read_mostly = { 456static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index cb46446b2691..03a9abcce524 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -551,7 +551,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
551 u64 mtt_seg; 551 u64 mtt_seg;
552 int err = -ENOMEM; 552 int err = -ENOMEM;
553 553
554 if (page_shift < 12 || page_shift >= 32) 554 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
555 return -EINVAL; 555 return -EINVAL;
556 556
557 /* All MTTs must fit in the same page */ 557 /* All MTTs must fit in the same page */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 381b36e5f64c..b7915cdcc6a5 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -91,6 +91,11 @@
91 */ 91 */
92#define PHY_ADDR_REG 0x0000 92#define PHY_ADDR_REG 0x0000
93#define SMI_REG 0x0004 93#define SMI_REG 0x0004
94#define WINDOW_BASE(i) (0x0200 + ((i) << 3))
95#define WINDOW_SIZE(i) (0x0204 + ((i) << 3))
96#define WINDOW_REMAP_HIGH(i) (0x0280 + ((i) << 2))
97#define WINDOW_BAR_ENABLE 0x0290
98#define WINDOW_PROTECT(i) (0x0294 + ((i) << 4))
94 99
95/* 100/*
96 * Per-port registers. 101 * Per-port registers.
@@ -507,9 +512,23 @@ struct mv643xx_mib_counters {
507 u32 late_collision; 512 u32 late_collision;
508}; 513};
509 514
515struct mv643xx_shared_private {
516 void __iomem *eth_base;
517
518 /* used to protect SMI_REG, which is shared across ports */
519 spinlock_t phy_lock;
520
521 u32 win_protect;
522
523 unsigned int t_clk;
524};
525
510struct mv643xx_private { 526struct mv643xx_private {
527 struct mv643xx_shared_private *shared;
511 int port_num; /* User Ethernet port number */ 528 int port_num; /* User Ethernet port number */
512 529
530 struct mv643xx_shared_private *shared_smi;
531
513 u32 rx_sram_addr; /* Base address of rx sram area */ 532 u32 rx_sram_addr; /* Base address of rx sram area */
514 u32 rx_sram_size; /* Size of rx sram area */ 533 u32 rx_sram_size; /* Size of rx sram area */
515 u32 tx_sram_addr; /* Base address of tx sram area */ 534 u32 tx_sram_addr; /* Base address of tx sram area */
@@ -614,19 +633,14 @@ static const struct ethtool_ops mv643xx_ethtool_ops;
614static char mv643xx_driver_name[] = "mv643xx_eth"; 633static char mv643xx_driver_name[] = "mv643xx_eth";
615static char mv643xx_driver_version[] = "1.0"; 634static char mv643xx_driver_version[] = "1.0";
616 635
617static void __iomem *mv643xx_eth_base;
618
619/* used to protect SMI_REG, which is shared across ports */
620static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
621
622static inline u32 rdl(struct mv643xx_private *mp, int offset) 636static inline u32 rdl(struct mv643xx_private *mp, int offset)
623{ 637{
624 return readl(mv643xx_eth_base + offset); 638 return readl(mp->shared->eth_base + offset);
625} 639}
626 640
627static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) 641static inline void wrl(struct mv643xx_private *mp, int offset, u32 data)
628{ 642{
629 writel(data, mv643xx_eth_base + offset); 643 writel(data, mp->shared->eth_base + offset);
630} 644}
631 645
632/* 646/*
@@ -1119,7 +1133,6 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1119 * 1133 *
1120 * INPUT: 1134 * INPUT:
1121 * struct mv643xx_private *mp Ethernet port 1135 * struct mv643xx_private *mp Ethernet port
1122 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
1123 * unsigned int delay Delay in usec 1136 * unsigned int delay Delay in usec
1124 * 1137 *
1125 * OUTPUT: 1138 * OUTPUT:
@@ -1130,10 +1143,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1130 * 1143 *
1131 */ 1144 */
1132static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, 1145static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
1133 unsigned int t_clk, unsigned int delay) 1146 unsigned int delay)
1134{ 1147{
1135 unsigned int port_num = mp->port_num; 1148 unsigned int port_num = mp->port_num;
1136 unsigned int coal = ((t_clk / 1000000) * delay) / 64; 1149 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1137 1150
1138 /* Set RX Coalescing mechanism */ 1151 /* Set RX Coalescing mechanism */
1139 wrl(mp, SDMA_CONFIG_REG(port_num), 1152 wrl(mp, SDMA_CONFIG_REG(port_num),
@@ -1158,7 +1171,6 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
1158 * 1171 *
1159 * INPUT: 1172 * INPUT:
1160 * struct mv643xx_private *mp Ethernet port 1173 * struct mv643xx_private *mp Ethernet port
1161 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
1162 * unsigned int delay Delay in uSeconds 1174 * unsigned int delay Delay in uSeconds
1163 * 1175 *
1164 * OUTPUT: 1176 * OUTPUT:
@@ -1169,9 +1181,9 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
1169 * 1181 *
1170 */ 1182 */
1171static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, 1183static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp,
1172 unsigned int t_clk, unsigned int delay) 1184 unsigned int delay)
1173{ 1185{
1174 unsigned int coal = ((t_clk / 1000000) * delay) / 64; 1186 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1175 1187
1176 /* Set TX Coalescing mechanism */ 1188 /* Set TX Coalescing mechanism */
1177 wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4); 1189 wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4);
@@ -1413,11 +1425,11 @@ static int mv643xx_eth_open(struct net_device *dev)
1413 1425
1414#ifdef MV643XX_COAL 1426#ifdef MV643XX_COAL
1415 mp->rx_int_coal = 1427 mp->rx_int_coal =
1416 eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL); 1428 eth_port_set_rx_coal(mp, MV643XX_RX_COAL);
1417#endif 1429#endif
1418 1430
1419 mp->tx_int_coal = 1431 mp->tx_int_coal =
1420 eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL); 1432 eth_port_set_tx_coal(mp, MV643XX_TX_COAL);
1421 1433
1422 /* Unmask phy and link status changes interrupts */ 1434 /* Unmask phy and link status changes interrupts */
1423 wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); 1435 wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
@@ -1827,6 +1839,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1827 return -ENODEV; 1839 return -ENODEV;
1828 } 1840 }
1829 1841
1842 if (pd->shared == NULL) {
1843 printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n");
1844 return -ENODEV;
1845 }
1846
1830 dev = alloc_etherdev(sizeof(struct mv643xx_private)); 1847 dev = alloc_etherdev(sizeof(struct mv643xx_private));
1831 if (!dev) 1848 if (!dev)
1832 return -ENOMEM; 1849 return -ENOMEM;
@@ -1877,8 +1894,16 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1877 1894
1878 spin_lock_init(&mp->lock); 1895 spin_lock_init(&mp->lock);
1879 1896
1897 mp->shared = platform_get_drvdata(pd->shared);
1880 port_num = mp->port_num = pd->port_number; 1898 port_num = mp->port_num = pd->port_number;
1881 1899
1900 if (mp->shared->win_protect)
1901 wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect);
1902
1903 mp->shared_smi = mp->shared;
1904 if (pd->shared_smi != NULL)
1905 mp->shared_smi = platform_get_drvdata(pd->shared_smi);
1906
1882 /* set default config values */ 1907 /* set default config values */
1883 eth_port_uc_addr_get(mp, dev->dev_addr); 1908 eth_port_uc_addr_get(mp, dev->dev_addr);
1884 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1909 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
@@ -1983,30 +2008,91 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
1983 return 0; 2008 return 0;
1984} 2009}
1985 2010
2011static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp,
2012 struct mbus_dram_target_info *dram)
2013{
2014 void __iomem *base = msp->eth_base;
2015 u32 win_enable;
2016 u32 win_protect;
2017 int i;
2018
2019 for (i = 0; i < 6; i++) {
2020 writel(0, base + WINDOW_BASE(i));
2021 writel(0, base + WINDOW_SIZE(i));
2022 if (i < 4)
2023 writel(0, base + WINDOW_REMAP_HIGH(i));
2024 }
2025
2026 win_enable = 0x3f;
2027 win_protect = 0;
2028
2029 for (i = 0; i < dram->num_cs; i++) {
2030 struct mbus_dram_window *cs = dram->cs + i;
2031
2032 writel((cs->base & 0xffff0000) |
2033 (cs->mbus_attr << 8) |
2034 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2035 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2036
2037 win_enable &= ~(1 << i);
2038 win_protect |= 3 << (2 * i);
2039 }
2040
2041 writel(win_enable, base + WINDOW_BAR_ENABLE);
2042 msp->win_protect = win_protect;
2043}
2044
1986static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2045static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1987{ 2046{
1988 static int mv643xx_version_printed = 0; 2047 static int mv643xx_version_printed = 0;
2048 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2049 struct mv643xx_shared_private *msp;
1989 struct resource *res; 2050 struct resource *res;
2051 int ret;
1990 2052
1991 if (!mv643xx_version_printed++) 2053 if (!mv643xx_version_printed++)
1992 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); 2054 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
1993 2055
2056 ret = -EINVAL;
1994 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2057 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1995 if (res == NULL) 2058 if (res == NULL)
1996 return -ENODEV; 2059 goto out;
1997 2060
1998 mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1); 2061 ret = -ENOMEM;
1999 if (mv643xx_eth_base == NULL) 2062 msp = kmalloc(sizeof(*msp), GFP_KERNEL);
2000 return -ENOMEM; 2063 if (msp == NULL)
2064 goto out;
2065 memset(msp, 0, sizeof(*msp));
2066
2067 msp->eth_base = ioremap(res->start, res->end - res->start + 1);
2068 if (msp->eth_base == NULL)
2069 goto out_free;
2070
2071 spin_lock_init(&msp->phy_lock);
2072 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2073
2074 platform_set_drvdata(pdev, msp);
2075
2076 /*
2077 * (Re-)program MBUS remapping windows if we are asked to.
2078 */
2079 if (pd != NULL && pd->dram != NULL)
2080 mv643xx_eth_conf_mbus_windows(msp, pd->dram);
2001 2081
2002 return 0; 2082 return 0;
2003 2083
2084out_free:
2085 kfree(msp);
2086out:
2087 return ret;
2004} 2088}
2005 2089
2006static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2090static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2007{ 2091{
2008 iounmap(mv643xx_eth_base); 2092 struct mv643xx_shared_private *msp = platform_get_drvdata(pdev);
2009 mv643xx_eth_base = NULL; 2093
2094 iounmap(msp->eth_base);
2095 kfree(msp);
2010 2096
2011 return 0; 2097 return 0;
2012} 2098}
@@ -2906,15 +2992,16 @@ static void eth_port_reset(struct mv643xx_private *mp)
2906static void eth_port_read_smi_reg(struct mv643xx_private *mp, 2992static void eth_port_read_smi_reg(struct mv643xx_private *mp,
2907 unsigned int phy_reg, unsigned int *value) 2993 unsigned int phy_reg, unsigned int *value)
2908{ 2994{
2995 void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG;
2909 int phy_addr = ethernet_phy_get(mp); 2996 int phy_addr = ethernet_phy_get(mp);
2910 unsigned long flags; 2997 unsigned long flags;
2911 int i; 2998 int i;
2912 2999
2913 /* the SMI register is a shared resource */ 3000 /* the SMI register is a shared resource */
2914 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 3001 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
2915 3002
2916 /* wait for the SMI register to become available */ 3003 /* wait for the SMI register to become available */
2917 for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { 3004 for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) {
2918 if (i == PHY_WAIT_ITERATIONS) { 3005 if (i == PHY_WAIT_ITERATIONS) {
2919 printk("%s: PHY busy timeout\n", mp->dev->name); 3006 printk("%s: PHY busy timeout\n", mp->dev->name);
2920 goto out; 3007 goto out;
@@ -2922,11 +3009,11 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp,
2922 udelay(PHY_WAIT_MICRO_SECONDS); 3009 udelay(PHY_WAIT_MICRO_SECONDS);
2923 } 3010 }
2924 3011
2925 wrl(mp, SMI_REG, 3012 writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ,
2926 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); 3013 smi_reg);
2927 3014
2928 /* now wait for the data to be valid */ 3015 /* now wait for the data to be valid */
2929 for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) { 3016 for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) {
2930 if (i == PHY_WAIT_ITERATIONS) { 3017 if (i == PHY_WAIT_ITERATIONS) {
2931 printk("%s: PHY read timeout\n", mp->dev->name); 3018 printk("%s: PHY read timeout\n", mp->dev->name);
2932 goto out; 3019 goto out;
@@ -2934,9 +3021,9 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp,
2934 udelay(PHY_WAIT_MICRO_SECONDS); 3021 udelay(PHY_WAIT_MICRO_SECONDS);
2935 } 3022 }
2936 3023
2937 *value = rdl(mp, SMI_REG) & 0xffff; 3024 *value = readl(smi_reg) & 0xffff;
2938out: 3025out:
2939 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 3026 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
2940} 3027}
2941 3028
2942/* 3029/*
@@ -2962,17 +3049,16 @@ out:
2962static void eth_port_write_smi_reg(struct mv643xx_private *mp, 3049static void eth_port_write_smi_reg(struct mv643xx_private *mp,
2963 unsigned int phy_reg, unsigned int value) 3050 unsigned int phy_reg, unsigned int value)
2964{ 3051{
2965 int phy_addr; 3052 void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG;
2966 int i; 3053 int phy_addr = ethernet_phy_get(mp);
2967 unsigned long flags; 3054 unsigned long flags;
2968 3055 int i;
2969 phy_addr = ethernet_phy_get(mp);
2970 3056
2971 /* the SMI register is a shared resource */ 3057 /* the SMI register is a shared resource */
2972 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 3058 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
2973 3059
2974 /* wait for the SMI register to become available */ 3060 /* wait for the SMI register to become available */
2975 for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { 3061 for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) {
2976 if (i == PHY_WAIT_ITERATIONS) { 3062 if (i == PHY_WAIT_ITERATIONS) {
2977 printk("%s: PHY busy timeout\n", mp->dev->name); 3063 printk("%s: PHY busy timeout\n", mp->dev->name);
2978 goto out; 3064 goto out;
@@ -2980,10 +3066,10 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp,
2980 udelay(PHY_WAIT_MICRO_SECONDS); 3066 udelay(PHY_WAIT_MICRO_SECONDS);
2981 } 3067 }
2982 3068
2983 wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) | 3069 writel((phy_addr << 16) | (phy_reg << 21) |
2984 ETH_SMI_OPCODE_WRITE | (value & 0xffff)); 3070 ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg);
2985out: 3071out:
2986 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 3072 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
2987} 3073}
2988 3074
2989/* 3075/*
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ef63c8d2bd7e..c91b12ea26ad 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -144,11 +144,13 @@ struct myri10ge_tx_buf {
144 char *req_bytes; 144 char *req_bytes;
145 struct myri10ge_tx_buffer_state *info; 145 struct myri10ge_tx_buffer_state *info;
146 int mask; /* number of transmit slots -1 */ 146 int mask; /* number of transmit slots -1 */
147 int boundary; /* boundary transmits cannot cross */
148 int req ____cacheline_aligned; /* transmit slots submitted */ 147 int req ____cacheline_aligned; /* transmit slots submitted */
149 int pkt_start; /* packets started */ 148 int pkt_start; /* packets started */
149 int stop_queue;
150 int linearized;
150 int done ____cacheline_aligned; /* transmit slots completed */ 151 int done ____cacheline_aligned; /* transmit slots completed */
151 int pkt_done; /* packets completed */ 152 int pkt_done; /* packets completed */
153 int wake_queue;
152}; 154};
153 155
154struct myri10ge_rx_done { 156struct myri10ge_rx_done {
@@ -160,29 +162,50 @@ struct myri10ge_rx_done {
160 struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; 162 struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
161}; 163};
162 164
163struct myri10ge_priv { 165struct myri10ge_slice_netstats {
164 int running; /* running? */ 166 unsigned long rx_packets;
165 int csum_flag; /* rx_csums? */ 167 unsigned long tx_packets;
168 unsigned long rx_bytes;
169 unsigned long tx_bytes;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172};
173
174struct myri10ge_slice_state {
166 struct myri10ge_tx_buf tx; /* transmit ring */ 175 struct myri10ge_tx_buf tx; /* transmit ring */
167 struct myri10ge_rx_buf rx_small; 176 struct myri10ge_rx_buf rx_small;
168 struct myri10ge_rx_buf rx_big; 177 struct myri10ge_rx_buf rx_big;
169 struct myri10ge_rx_done rx_done; 178 struct myri10ge_rx_done rx_done;
179 struct net_device *dev;
180 struct napi_struct napi;
181 struct myri10ge_priv *mgp;
182 struct myri10ge_slice_netstats stats;
183 __be32 __iomem *irq_claim;
184 struct mcp_irq_data *fw_stats;
185 dma_addr_t fw_stats_bus;
186 int watchdog_tx_done;
187 int watchdog_tx_req;
188};
189
190struct myri10ge_priv {
191 struct myri10ge_slice_state ss;
192 int tx_boundary; /* boundary transmits cannot cross */
193 int running; /* running? */
194 int csum_flag; /* rx_csums? */
170 int small_bytes; 195 int small_bytes;
171 int big_bytes; 196 int big_bytes;
197 int max_intr_slots;
172 struct net_device *dev; 198 struct net_device *dev;
173 struct napi_struct napi;
174 struct net_device_stats stats; 199 struct net_device_stats stats;
200 spinlock_t stats_lock;
175 u8 __iomem *sram; 201 u8 __iomem *sram;
176 int sram_size; 202 int sram_size;
177 unsigned long board_span; 203 unsigned long board_span;
178 unsigned long iomem_base; 204 unsigned long iomem_base;
179 __be32 __iomem *irq_claim;
180 __be32 __iomem *irq_deassert; 205 __be32 __iomem *irq_deassert;
181 char *mac_addr_string; 206 char *mac_addr_string;
182 struct mcp_cmd_response *cmd; 207 struct mcp_cmd_response *cmd;
183 dma_addr_t cmd_bus; 208 dma_addr_t cmd_bus;
184 struct mcp_irq_data *fw_stats;
185 dma_addr_t fw_stats_bus;
186 struct pci_dev *pdev; 209 struct pci_dev *pdev;
187 int msi_enabled; 210 int msi_enabled;
188 u32 link_state; 211 u32 link_state;
@@ -191,20 +214,16 @@ struct myri10ge_priv {
191 __be32 __iomem *intr_coal_delay_ptr; 214 __be32 __iomem *intr_coal_delay_ptr;
192 int mtrr; 215 int mtrr;
193 int wc_enabled; 216 int wc_enabled;
194 int wake_queue;
195 int stop_queue;
196 int down_cnt; 217 int down_cnt;
197 wait_queue_head_t down_wq; 218 wait_queue_head_t down_wq;
198 struct work_struct watchdog_work; 219 struct work_struct watchdog_work;
199 struct timer_list watchdog_timer; 220 struct timer_list watchdog_timer;
200 int watchdog_tx_done;
201 int watchdog_tx_req;
202 int watchdog_pause;
203 int watchdog_resets; 221 int watchdog_resets;
204 int tx_linearized; 222 int watchdog_pause;
205 int pause; 223 int pause;
206 char *fw_name; 224 char *fw_name;
207 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; 225 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
226 char *product_code_string;
208 char fw_version[128]; 227 char fw_version[128];
209 int fw_ver_major; 228 int fw_ver_major;
210 int fw_ver_minor; 229 int fw_ver_minor;
@@ -228,58 +247,54 @@ static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
228 247
229static char *myri10ge_fw_name = NULL; 248static char *myri10ge_fw_name = NULL;
230module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 249module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
231MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n"); 250MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
232 251
233static int myri10ge_ecrc_enable = 1; 252static int myri10ge_ecrc_enable = 1;
234module_param(myri10ge_ecrc_enable, int, S_IRUGO); 253module_param(myri10ge_ecrc_enable, int, S_IRUGO);
235MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n"); 254MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
236
237static int myri10ge_max_intr_slots = 1024;
238module_param(myri10ge_max_intr_slots, int, S_IRUGO);
239MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n");
240 255
241static int myri10ge_small_bytes = -1; /* -1 == auto */ 256static int myri10ge_small_bytes = -1; /* -1 == auto */
242module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); 257module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
243MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); 258MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
244 259
245static int myri10ge_msi = 1; /* enable msi by default */ 260static int myri10ge_msi = 1; /* enable msi by default */
246module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); 261module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
247MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); 262MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
248 263
249static int myri10ge_intr_coal_delay = 75; 264static int myri10ge_intr_coal_delay = 75;
250module_param(myri10ge_intr_coal_delay, int, S_IRUGO); 265module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
251MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n"); 266MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
252 267
253static int myri10ge_flow_control = 1; 268static int myri10ge_flow_control = 1;
254module_param(myri10ge_flow_control, int, S_IRUGO); 269module_param(myri10ge_flow_control, int, S_IRUGO);
255MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n"); 270MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
256 271
257static int myri10ge_deassert_wait = 1; 272static int myri10ge_deassert_wait = 1;
258module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); 273module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
259MODULE_PARM_DESC(myri10ge_deassert_wait, 274MODULE_PARM_DESC(myri10ge_deassert_wait,
260 "Wait when deasserting legacy interrupts\n"); 275 "Wait when deasserting legacy interrupts");
261 276
262static int myri10ge_force_firmware = 0; 277static int myri10ge_force_firmware = 0;
263module_param(myri10ge_force_firmware, int, S_IRUGO); 278module_param(myri10ge_force_firmware, int, S_IRUGO);
264MODULE_PARM_DESC(myri10ge_force_firmware, 279MODULE_PARM_DESC(myri10ge_force_firmware,
265 "Force firmware to assume aligned completions\n"); 280 "Force firmware to assume aligned completions");
266 281
267static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 282static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
268module_param(myri10ge_initial_mtu, int, S_IRUGO); 283module_param(myri10ge_initial_mtu, int, S_IRUGO);
269MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); 284MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
270 285
271static int myri10ge_napi_weight = 64; 286static int myri10ge_napi_weight = 64;
272module_param(myri10ge_napi_weight, int, S_IRUGO); 287module_param(myri10ge_napi_weight, int, S_IRUGO);
273MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n"); 288MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
274 289
275static int myri10ge_watchdog_timeout = 1; 290static int myri10ge_watchdog_timeout = 1;
276module_param(myri10ge_watchdog_timeout, int, S_IRUGO); 291module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
277MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n"); 292MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
278 293
279static int myri10ge_max_irq_loops = 1048576; 294static int myri10ge_max_irq_loops = 1048576;
280module_param(myri10ge_max_irq_loops, int, S_IRUGO); 295module_param(myri10ge_max_irq_loops, int, S_IRUGO);
281MODULE_PARM_DESC(myri10ge_max_irq_loops, 296MODULE_PARM_DESC(myri10ge_max_irq_loops,
282 "Set stuck legacy IRQ detection threshold\n"); 297 "Set stuck legacy IRQ detection threshold");
283 298
284#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK 299#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
285 300
@@ -289,21 +304,22 @@ MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
289 304
290static int myri10ge_lro = 1; 305static int myri10ge_lro = 1;
291module_param(myri10ge_lro, int, S_IRUGO); 306module_param(myri10ge_lro, int, S_IRUGO);
292MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n"); 307MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload");
293 308
294static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; 309static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
295module_param(myri10ge_lro_max_pkts, int, S_IRUGO); 310module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
296MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n"); 311MODULE_PARM_DESC(myri10ge_lro_max_pkts,
312 "Number of LRO packets to be aggregated");
297 313
298static int myri10ge_fill_thresh = 256; 314static int myri10ge_fill_thresh = 256;
299module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); 315module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
300MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); 316MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
301 317
302static int myri10ge_reset_recover = 1; 318static int myri10ge_reset_recover = 1;
303 319
304static int myri10ge_wcfifo = 0; 320static int myri10ge_wcfifo = 0;
305module_param(myri10ge_wcfifo, int, S_IRUGO); 321module_param(myri10ge_wcfifo, int, S_IRUGO);
306MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n"); 322MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled");
307 323
308#define MYRI10GE_FW_OFFSET 1024*1024 324#define MYRI10GE_FW_OFFSET 1024*1024
309#define MYRI10GE_HIGHPART_TO_U32(X) \ 325#define MYRI10GE_HIGHPART_TO_U32(X) \
@@ -359,8 +375,10 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
359 for (sleep_total = 0; 375 for (sleep_total = 0;
360 sleep_total < 1000 376 sleep_total < 1000
361 && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); 377 && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
362 sleep_total += 10) 378 sleep_total += 10) {
363 udelay(10); 379 udelay(10);
380 mb();
381 }
364 } else { 382 } else {
365 /* use msleep for most command */ 383 /* use msleep for most command */
366 for (sleep_total = 0; 384 for (sleep_total = 0;
@@ -420,6 +438,10 @@ static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
420 ptr += 1; 438 ptr += 1;
421 } 439 }
422 } 440 }
441 if (memcmp(ptr, "PC=", 3) == 0) {
442 ptr += 3;
443 mgp->product_code_string = ptr;
444 }
423 if (memcmp((const void *)ptr, "SN=", 3) == 0) { 445 if (memcmp((const void *)ptr, "SN=", 3) == 0) {
424 ptr += 3; 446 ptr += 3;
425 mgp->serial_number = simple_strtoul(ptr, &ptr, 10); 447 mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
@@ -442,7 +464,7 @@ abort:
442static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) 464static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
443{ 465{
444 char __iomem *submit; 466 char __iomem *submit;
445 __be32 buf[16]; 467 __be32 buf[16] __attribute__ ((__aligned__(8)));
446 u32 dma_low, dma_high; 468 u32 dma_low, dma_high;
447 int i; 469 int i;
448 470
@@ -609,13 +631,38 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
609 return status; 631 return status;
610} 632}
611 633
634int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
635{
636 struct myri10ge_cmd cmd;
637 int status;
638
639 /* probe for IPv6 TSO support */
640 mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
641 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
642 &cmd, 0);
643 if (status == 0) {
644 mgp->max_tso6 = cmd.data0;
645 mgp->features |= NETIF_F_TSO6;
646 }
647
648 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
649 if (status != 0) {
650 dev_err(&mgp->pdev->dev,
651 "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
652 return -ENXIO;
653 }
654
655 mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
656
657 return 0;
658}
659
612static int myri10ge_load_firmware(struct myri10ge_priv *mgp) 660static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
613{ 661{
614 char __iomem *submit; 662 char __iomem *submit;
615 __be32 buf[16]; 663 __be32 buf[16] __attribute__ ((__aligned__(8)));
616 u32 dma_low, dma_high, size; 664 u32 dma_low, dma_high, size;
617 int status, i; 665 int status, i;
618 struct myri10ge_cmd cmd;
619 666
620 size = 0; 667 size = 0;
621 status = myri10ge_load_hotplug_firmware(mgp, &size); 668 status = myri10ge_load_hotplug_firmware(mgp, &size);
@@ -635,7 +682,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
635 } 682 }
636 dev_info(&mgp->pdev->dev, 683 dev_info(&mgp->pdev->dev,
637 "Successfully adopted running firmware\n"); 684 "Successfully adopted running firmware\n");
638 if (mgp->tx.boundary == 4096) { 685 if (mgp->tx_boundary == 4096) {
639 dev_warn(&mgp->pdev->dev, 686 dev_warn(&mgp->pdev->dev,
640 "Using firmware currently running on NIC" 687 "Using firmware currently running on NIC"
641 ". For optimal\n"); 688 ". For optimal\n");
@@ -646,7 +693,9 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
646 } 693 }
647 694
648 mgp->fw_name = "adopted"; 695 mgp->fw_name = "adopted";
649 mgp->tx.boundary = 2048; 696 mgp->tx_boundary = 2048;
697 myri10ge_dummy_rdma(mgp, 1);
698 status = myri10ge_get_firmware_capabilities(mgp);
650 return status; 699 return status;
651 } 700 }
652 701
@@ -681,26 +730,18 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
681 msleep(1); 730 msleep(1);
682 mb(); 731 mb();
683 i = 0; 732 i = 0;
684 while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) { 733 while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
685 msleep(1); 734 msleep(1 << i);
686 i++; 735 i++;
687 } 736 }
688 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { 737 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
689 dev_err(&mgp->pdev->dev, "handoff failed\n"); 738 dev_err(&mgp->pdev->dev, "handoff failed\n");
690 return -ENXIO; 739 return -ENXIO;
691 } 740 }
692 dev_info(&mgp->pdev->dev, "handoff confirmed\n");
693 myri10ge_dummy_rdma(mgp, 1); 741 myri10ge_dummy_rdma(mgp, 1);
742 status = myri10ge_get_firmware_capabilities(mgp);
694 743
695 /* probe for IPv6 TSO support */ 744 return status;
696 mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
697 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
698 &cmd, 0);
699 if (status == 0) {
700 mgp->max_tso6 = cmd.data0;
701 mgp->features |= NETIF_F_TSO6;
702 }
703 return 0;
704} 745}
705 746
706static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) 747static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
@@ -772,7 +813,7 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
772 * transfers took to complete. 813 * transfers took to complete.
773 */ 814 */
774 815
775 len = mgp->tx.boundary; 816 len = mgp->tx_boundary;
776 817
777 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); 818 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
778 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); 819 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
@@ -834,17 +875,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
834 875
835 /* Now exchange information about interrupts */ 876 /* Now exchange information about interrupts */
836 877
837 bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 878 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
838 memset(mgp->rx_done.entry, 0, bytes); 879 memset(mgp->ss.rx_done.entry, 0, bytes);
839 cmd.data0 = (u32) bytes; 880 cmd.data0 = (u32) bytes;
840 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); 881 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
841 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); 882 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus);
842 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); 883 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus);
843 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); 884 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0);
844 885
845 status |= 886 status |=
846 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); 887 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
847 mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); 888 mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0);
848 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 889 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
849 &cmd, 0); 890 &cmd, 0);
850 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); 891 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
@@ -858,17 +899,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
858 } 899 }
859 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 900 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
860 901
861 memset(mgp->rx_done.entry, 0, bytes); 902 memset(mgp->ss.rx_done.entry, 0, bytes);
862 903
863 /* reset mcp/driver shared state back to 0 */ 904 /* reset mcp/driver shared state back to 0 */
864 mgp->tx.req = 0; 905 mgp->ss.tx.req = 0;
865 mgp->tx.done = 0; 906 mgp->ss.tx.done = 0;
866 mgp->tx.pkt_start = 0; 907 mgp->ss.tx.pkt_start = 0;
867 mgp->tx.pkt_done = 0; 908 mgp->ss.tx.pkt_done = 0;
868 mgp->rx_big.cnt = 0; 909 mgp->ss.rx_big.cnt = 0;
869 mgp->rx_small.cnt = 0; 910 mgp->ss.rx_small.cnt = 0;
870 mgp->rx_done.idx = 0; 911 mgp->ss.rx_done.idx = 0;
871 mgp->rx_done.cnt = 0; 912 mgp->ss.rx_done.cnt = 0;
872 mgp->link_changes = 0; 913 mgp->link_changes = 0;
873 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 914 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
874 myri10ge_change_pause(mgp, mgp->pause); 915 myri10ge_change_pause(mgp, mgp->pause);
@@ -1020,9 +1061,10 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
1020 * page into an skb */ 1061 * page into an skb */
1021 1062
1022static inline int 1063static inline int
1023myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, 1064myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
1024 int bytes, int len, __wsum csum) 1065 int bytes, int len, __wsum csum)
1025{ 1066{
1067 struct myri10ge_priv *mgp = ss->mgp;
1026 struct sk_buff *skb; 1068 struct sk_buff *skb;
1027 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; 1069 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
1028 int i, idx, hlen, remainder; 1070 int i, idx, hlen, remainder;
@@ -1052,11 +1094,10 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1052 rx_frags[0].page_offset += MXGEFW_PAD; 1094 rx_frags[0].page_offset += MXGEFW_PAD;
1053 rx_frags[0].size -= MXGEFW_PAD; 1095 rx_frags[0].size -= MXGEFW_PAD;
1054 len -= MXGEFW_PAD; 1096 len -= MXGEFW_PAD;
1055 lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, 1097 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
1056 len, len, 1098 len, len,
1057 /* opaque, will come back in get_frag_header */ 1099 /* opaque, will come back in get_frag_header */
1058 (void *)(__force unsigned long)csum, 1100 (void *)(__force unsigned long)csum, csum);
1059 csum);
1060 return 1; 1101 return 1;
1061 } 1102 }
1062 1103
@@ -1096,10 +1137,11 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1096 return 1; 1137 return 1;
1097} 1138}
1098 1139
1099static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) 1140static inline void
1141myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1100{ 1142{
1101 struct pci_dev *pdev = mgp->pdev; 1143 struct pci_dev *pdev = ss->mgp->pdev;
1102 struct myri10ge_tx_buf *tx = &mgp->tx; 1144 struct myri10ge_tx_buf *tx = &ss->tx;
1103 struct sk_buff *skb; 1145 struct sk_buff *skb;
1104 int idx, len; 1146 int idx, len;
1105 1147
@@ -1117,8 +1159,8 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
1117 len = pci_unmap_len(&tx->info[idx], len); 1159 len = pci_unmap_len(&tx->info[idx], len);
1118 pci_unmap_len_set(&tx->info[idx], len, 0); 1160 pci_unmap_len_set(&tx->info[idx], len, 0);
1119 if (skb) { 1161 if (skb) {
1120 mgp->stats.tx_bytes += skb->len; 1162 ss->stats.tx_bytes += skb->len;
1121 mgp->stats.tx_packets++; 1163 ss->stats.tx_packets++;
1122 dev_kfree_skb_irq(skb); 1164 dev_kfree_skb_irq(skb);
1123 if (len) 1165 if (len)
1124 pci_unmap_single(pdev, 1166 pci_unmap_single(pdev,
@@ -1134,16 +1176,18 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
1134 } 1176 }
1135 } 1177 }
1136 /* start the queue if we've stopped it */ 1178 /* start the queue if we've stopped it */
1137 if (netif_queue_stopped(mgp->dev) 1179 if (netif_queue_stopped(ss->dev)
1138 && tx->req - tx->done < (tx->mask >> 1)) { 1180 && tx->req - tx->done < (tx->mask >> 1)) {
1139 mgp->wake_queue++; 1181 tx->wake_queue++;
1140 netif_wake_queue(mgp->dev); 1182 netif_wake_queue(ss->dev);
1141 } 1183 }
1142} 1184}
1143 1185
1144static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) 1186static inline int
1187myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1145{ 1188{
1146 struct myri10ge_rx_done *rx_done = &mgp->rx_done; 1189 struct myri10ge_rx_done *rx_done = &ss->rx_done;
1190 struct myri10ge_priv *mgp = ss->mgp;
1147 unsigned long rx_bytes = 0; 1191 unsigned long rx_bytes = 0;
1148 unsigned long rx_packets = 0; 1192 unsigned long rx_packets = 0;
1149 unsigned long rx_ok; 1193 unsigned long rx_ok;
@@ -1159,40 +1203,40 @@ static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget)
1159 rx_done->entry[idx].length = 0; 1203 rx_done->entry[idx].length = 0;
1160 checksum = csum_unfold(rx_done->entry[idx].checksum); 1204 checksum = csum_unfold(rx_done->entry[idx].checksum);
1161 if (length <= mgp->small_bytes) 1205 if (length <= mgp->small_bytes)
1162 rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, 1206 rx_ok = myri10ge_rx_done(ss, &ss->rx_small,
1163 mgp->small_bytes, 1207 mgp->small_bytes,
1164 length, checksum); 1208 length, checksum);
1165 else 1209 else
1166 rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, 1210 rx_ok = myri10ge_rx_done(ss, &ss->rx_big,
1167 mgp->big_bytes, 1211 mgp->big_bytes,
1168 length, checksum); 1212 length, checksum);
1169 rx_packets += rx_ok; 1213 rx_packets += rx_ok;
1170 rx_bytes += rx_ok * (unsigned long)length; 1214 rx_bytes += rx_ok * (unsigned long)length;
1171 cnt++; 1215 cnt++;
1172 idx = cnt & (myri10ge_max_intr_slots - 1); 1216 idx = cnt & (mgp->max_intr_slots - 1);
1173 work_done++; 1217 work_done++;
1174 } 1218 }
1175 rx_done->idx = idx; 1219 rx_done->idx = idx;
1176 rx_done->cnt = cnt; 1220 rx_done->cnt = cnt;
1177 mgp->stats.rx_packets += rx_packets; 1221 ss->stats.rx_packets += rx_packets;
1178 mgp->stats.rx_bytes += rx_bytes; 1222 ss->stats.rx_bytes += rx_bytes;
1179 1223
1180 if (myri10ge_lro) 1224 if (myri10ge_lro)
1181 lro_flush_all(&rx_done->lro_mgr); 1225 lro_flush_all(&rx_done->lro_mgr);
1182 1226
1183 /* restock receive rings if needed */ 1227 /* restock receive rings if needed */
1184 if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) 1228 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
1185 myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, 1229 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
1186 mgp->small_bytes + MXGEFW_PAD, 0); 1230 mgp->small_bytes + MXGEFW_PAD, 0);
1187 if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) 1231 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
1188 myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); 1232 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1189 1233
1190 return work_done; 1234 return work_done;
1191} 1235}
1192 1236
1193static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) 1237static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1194{ 1238{
1195 struct mcp_irq_data *stats = mgp->fw_stats; 1239 struct mcp_irq_data *stats = mgp->ss.fw_stats;
1196 1240
1197 if (unlikely(stats->stats_updated)) { 1241 if (unlikely(stats->stats_updated)) {
1198 unsigned link_up = ntohl(stats->link_up); 1242 unsigned link_up = ntohl(stats->link_up);
@@ -1219,9 +1263,9 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1219 } 1263 }
1220 } 1264 }
1221 if (mgp->rdma_tags_available != 1265 if (mgp->rdma_tags_available !=
1222 ntohl(mgp->fw_stats->rdma_tags_available)) { 1266 ntohl(stats->rdma_tags_available)) {
1223 mgp->rdma_tags_available = 1267 mgp->rdma_tags_available =
1224 ntohl(mgp->fw_stats->rdma_tags_available); 1268 ntohl(stats->rdma_tags_available);
1225 printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " 1269 printk(KERN_WARNING "myri10ge: %s: RDMA timed out! "
1226 "%d tags left\n", mgp->dev->name, 1270 "%d tags left\n", mgp->dev->name,
1227 mgp->rdma_tags_available); 1271 mgp->rdma_tags_available);
@@ -1234,26 +1278,27 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1234 1278
1235static int myri10ge_poll(struct napi_struct *napi, int budget) 1279static int myri10ge_poll(struct napi_struct *napi, int budget)
1236{ 1280{
1237 struct myri10ge_priv *mgp = 1281 struct myri10ge_slice_state *ss =
1238 container_of(napi, struct myri10ge_priv, napi); 1282 container_of(napi, struct myri10ge_slice_state, napi);
1239 struct net_device *netdev = mgp->dev; 1283 struct net_device *netdev = ss->mgp->dev;
1240 int work_done; 1284 int work_done;
1241 1285
1242 /* process as many rx events as NAPI will allow */ 1286 /* process as many rx events as NAPI will allow */
1243 work_done = myri10ge_clean_rx_done(mgp, budget); 1287 work_done = myri10ge_clean_rx_done(ss, budget);
1244 1288
1245 if (work_done < budget) { 1289 if (work_done < budget) {
1246 netif_rx_complete(netdev, napi); 1290 netif_rx_complete(netdev, napi);
1247 put_be32(htonl(3), mgp->irq_claim); 1291 put_be32(htonl(3), ss->irq_claim);
1248 } 1292 }
1249 return work_done; 1293 return work_done;
1250} 1294}
1251 1295
1252static irqreturn_t myri10ge_intr(int irq, void *arg) 1296static irqreturn_t myri10ge_intr(int irq, void *arg)
1253{ 1297{
1254 struct myri10ge_priv *mgp = arg; 1298 struct myri10ge_slice_state *ss = arg;
1255 struct mcp_irq_data *stats = mgp->fw_stats; 1299 struct myri10ge_priv *mgp = ss->mgp;
1256 struct myri10ge_tx_buf *tx = &mgp->tx; 1300 struct mcp_irq_data *stats = ss->fw_stats;
1301 struct myri10ge_tx_buf *tx = &ss->tx;
1257 u32 send_done_count; 1302 u32 send_done_count;
1258 int i; 1303 int i;
1259 1304
@@ -1264,7 +1309,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1264 /* low bit indicates receives are present, so schedule 1309 /* low bit indicates receives are present, so schedule
1265 * napi poll handler */ 1310 * napi poll handler */
1266 if (stats->valid & 1) 1311 if (stats->valid & 1)
1267 netif_rx_schedule(mgp->dev, &mgp->napi); 1312 netif_rx_schedule(ss->dev, &ss->napi);
1268 1313
1269 if (!mgp->msi_enabled) { 1314 if (!mgp->msi_enabled) {
1270 put_be32(0, mgp->irq_deassert); 1315 put_be32(0, mgp->irq_deassert);
@@ -1281,7 +1326,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1281 /* check for transmit completes and receives */ 1326 /* check for transmit completes and receives */
1282 send_done_count = ntohl(stats->send_done_count); 1327 send_done_count = ntohl(stats->send_done_count);
1283 if (send_done_count != tx->pkt_done) 1328 if (send_done_count != tx->pkt_done)
1284 myri10ge_tx_done(mgp, (int)send_done_count); 1329 myri10ge_tx_done(ss, (int)send_done_count);
1285 if (unlikely(i > myri10ge_max_irq_loops)) { 1330 if (unlikely(i > myri10ge_max_irq_loops)) {
1286 printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", 1331 printk(KERN_WARNING "myri10ge: %s: irq stuck?\n",
1287 mgp->dev->name); 1332 mgp->dev->name);
@@ -1296,16 +1341,46 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1296 1341
1297 myri10ge_check_statblock(mgp); 1342 myri10ge_check_statblock(mgp);
1298 1343
1299 put_be32(htonl(3), mgp->irq_claim + 1); 1344 put_be32(htonl(3), ss->irq_claim + 1);
1300 return (IRQ_HANDLED); 1345 return (IRQ_HANDLED);
1301} 1346}
1302 1347
1303static int 1348static int
1304myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 1349myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1305{ 1350{
1351 struct myri10ge_priv *mgp = netdev_priv(netdev);
1352 char *ptr;
1353 int i;
1354
1306 cmd->autoneg = AUTONEG_DISABLE; 1355 cmd->autoneg = AUTONEG_DISABLE;
1307 cmd->speed = SPEED_10000; 1356 cmd->speed = SPEED_10000;
1308 cmd->duplex = DUPLEX_FULL; 1357 cmd->duplex = DUPLEX_FULL;
1358
1359 /*
1360 * parse the product code to deterimine the interface type
1361 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
1362 * after the 3rd dash in the driver's cached copy of the
1363 * EEPROM's product code string.
1364 */
1365 ptr = mgp->product_code_string;
1366 if (ptr == NULL) {
1367 printk(KERN_ERR "myri10ge: %s: Missing product code\n",
1368 netdev->name);
1369 return 0;
1370 }
1371 for (i = 0; i < 3; i++, ptr++) {
1372 ptr = strchr(ptr, '-');
1373 if (ptr == NULL) {
1374 printk(KERN_ERR "myri10ge: %s: Invalid product "
1375 "code %s\n", netdev->name,
1376 mgp->product_code_string);
1377 return 0;
1378 }
1379 }
1380 if (*ptr == 'R' || *ptr == 'Q') {
1381 /* We've found either an XFP or quad ribbon fiber */
1382 cmd->port = PORT_FIBRE;
1383 }
1309 return 0; 1384 return 0;
1310} 1385}
1311 1386
@@ -1324,6 +1399,7 @@ static int
1324myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) 1399myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
1325{ 1400{
1326 struct myri10ge_priv *mgp = netdev_priv(netdev); 1401 struct myri10ge_priv *mgp = netdev_priv(netdev);
1402
1327 coal->rx_coalesce_usecs = mgp->intr_coal_delay; 1403 coal->rx_coalesce_usecs = mgp->intr_coal_delay;
1328 return 0; 1404 return 0;
1329} 1405}
@@ -1370,10 +1446,10 @@ myri10ge_get_ringparam(struct net_device *netdev,
1370{ 1446{
1371 struct myri10ge_priv *mgp = netdev_priv(netdev); 1447 struct myri10ge_priv *mgp = netdev_priv(netdev);
1372 1448
1373 ring->rx_mini_max_pending = mgp->rx_small.mask + 1; 1449 ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1;
1374 ring->rx_max_pending = mgp->rx_big.mask + 1; 1450 ring->rx_max_pending = mgp->ss.rx_big.mask + 1;
1375 ring->rx_jumbo_max_pending = 0; 1451 ring->rx_jumbo_max_pending = 0;
1376 ring->tx_max_pending = mgp->rx_small.mask + 1; 1452 ring->tx_max_pending = mgp->ss.rx_small.mask + 1;
1377 ring->rx_mini_pending = ring->rx_mini_max_pending; 1453 ring->rx_mini_pending = ring->rx_mini_max_pending;
1378 ring->rx_pending = ring->rx_max_pending; 1454 ring->rx_pending = ring->rx_max_pending;
1379 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; 1455 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
@@ -1383,6 +1459,7 @@ myri10ge_get_ringparam(struct net_device *netdev,
1383static u32 myri10ge_get_rx_csum(struct net_device *netdev) 1459static u32 myri10ge_get_rx_csum(struct net_device *netdev)
1384{ 1460{
1385 struct myri10ge_priv *mgp = netdev_priv(netdev); 1461 struct myri10ge_priv *mgp = netdev_priv(netdev);
1462
1386 if (mgp->csum_flag) 1463 if (mgp->csum_flag)
1387 return 1; 1464 return 1;
1388 else 1465 else
@@ -1392,6 +1469,7 @@ static u32 myri10ge_get_rx_csum(struct net_device *netdev)
1392static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) 1469static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
1393{ 1470{
1394 struct myri10ge_priv *mgp = netdev_priv(netdev); 1471 struct myri10ge_priv *mgp = netdev_priv(netdev);
1472
1395 if (csum_enabled) 1473 if (csum_enabled)
1396 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 1474 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
1397 else 1475 else
@@ -1411,7 +1489,7 @@ static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
1411 return 0; 1489 return 0;
1412} 1490}
1413 1491
1414static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { 1492static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1415 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 1493 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
1416 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 1494 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
1417 "rx_length_errors", "rx_over_errors", "rx_crc_errors", 1495 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
@@ -1421,28 +1499,39 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
1421 /* device-specific stats */ 1499 /* device-specific stats */
1422 "tx_boundary", "WC", "irq", "MSI", 1500 "tx_boundary", "WC", "irq", "MSI",
1423 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1501 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1424 "serial_number", "tx_pkt_start", "tx_pkt_done", 1502 "serial_number", "watchdog_resets",
1425 "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
1426 "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
1427 "link_changes", "link_up", "dropped_link_overflow", 1503 "link_changes", "link_up", "dropped_link_overflow",
1428 "dropped_link_error_or_filtered", 1504 "dropped_link_error_or_filtered",
1429 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", 1505 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
1430 "dropped_unicast_filtered", "dropped_multicast_filtered", 1506 "dropped_unicast_filtered", "dropped_multicast_filtered",
1431 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", 1507 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
1432 "dropped_no_big_buffer", "LRO aggregated", "LRO flushed", 1508 "dropped_no_big_buffer"
1509};
1510
1511static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1512 "----------- slice ---------",
1513 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1514 "rx_small_cnt", "rx_big_cnt",
1515 "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated",
1516 "LRO flushed",
1433 "LRO avg aggr", "LRO no_desc" 1517 "LRO avg aggr", "LRO no_desc"
1434}; 1518};
1435 1519
1436#define MYRI10GE_NET_STATS_LEN 21 1520#define MYRI10GE_NET_STATS_LEN 21
1437#define MYRI10GE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_stats) 1521#define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
1522#define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
1438 1523
1439static void 1524static void
1440myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) 1525myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
1441{ 1526{
1442 switch (stringset) { 1527 switch (stringset) {
1443 case ETH_SS_STATS: 1528 case ETH_SS_STATS:
1444 memcpy(data, *myri10ge_gstrings_stats, 1529 memcpy(data, *myri10ge_gstrings_main_stats,
1445 sizeof(myri10ge_gstrings_stats)); 1530 sizeof(myri10ge_gstrings_main_stats));
1531 data += sizeof(myri10ge_gstrings_main_stats);
1532 memcpy(data, *myri10ge_gstrings_slice_stats,
1533 sizeof(myri10ge_gstrings_slice_stats));
1534 data += sizeof(myri10ge_gstrings_slice_stats);
1446 break; 1535 break;
1447 } 1536 }
1448} 1537}
@@ -1451,7 +1540,7 @@ static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
1451{ 1540{
1452 switch (sset) { 1541 switch (sset) {
1453 case ETH_SS_STATS: 1542 case ETH_SS_STATS:
1454 return MYRI10GE_STATS_LEN; 1543 return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN;
1455 default: 1544 default:
1456 return -EOPNOTSUPP; 1545 return -EOPNOTSUPP;
1457 } 1546 }
@@ -1462,12 +1551,13 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1462 struct ethtool_stats *stats, u64 * data) 1551 struct ethtool_stats *stats, u64 * data)
1463{ 1552{
1464 struct myri10ge_priv *mgp = netdev_priv(netdev); 1553 struct myri10ge_priv *mgp = netdev_priv(netdev);
1554 struct myri10ge_slice_state *ss;
1465 int i; 1555 int i;
1466 1556
1467 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1557 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
1468 data[i] = ((unsigned long *)&mgp->stats)[i]; 1558 data[i] = ((unsigned long *)&mgp->stats)[i];
1469 1559
1470 data[i++] = (unsigned int)mgp->tx.boundary; 1560 data[i++] = (unsigned int)mgp->tx_boundary;
1471 data[i++] = (unsigned int)mgp->wc_enabled; 1561 data[i++] = (unsigned int)mgp->wc_enabled;
1472 data[i++] = (unsigned int)mgp->pdev->irq; 1562 data[i++] = (unsigned int)mgp->pdev->irq;
1473 data[i++] = (unsigned int)mgp->msi_enabled; 1563 data[i++] = (unsigned int)mgp->msi_enabled;
@@ -1475,40 +1565,44 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1475 data[i++] = (unsigned int)mgp->write_dma; 1565 data[i++] = (unsigned int)mgp->write_dma;
1476 data[i++] = (unsigned int)mgp->read_write_dma; 1566 data[i++] = (unsigned int)mgp->read_write_dma;
1477 data[i++] = (unsigned int)mgp->serial_number; 1567 data[i++] = (unsigned int)mgp->serial_number;
1478 data[i++] = (unsigned int)mgp->tx.pkt_start;
1479 data[i++] = (unsigned int)mgp->tx.pkt_done;
1480 data[i++] = (unsigned int)mgp->tx.req;
1481 data[i++] = (unsigned int)mgp->tx.done;
1482 data[i++] = (unsigned int)mgp->rx_small.cnt;
1483 data[i++] = (unsigned int)mgp->rx_big.cnt;
1484 data[i++] = (unsigned int)mgp->wake_queue;
1485 data[i++] = (unsigned int)mgp->stop_queue;
1486 data[i++] = (unsigned int)mgp->watchdog_resets; 1568 data[i++] = (unsigned int)mgp->watchdog_resets;
1487 data[i++] = (unsigned int)mgp->tx_linearized;
1488 data[i++] = (unsigned int)mgp->link_changes; 1569 data[i++] = (unsigned int)mgp->link_changes;
1489 data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up); 1570
1490 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow); 1571 /* firmware stats are useful only in the first slice */
1491 data[i++] = 1572 ss = &mgp->ss;
1492 (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered); 1573 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1493 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause); 1574 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1494 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy);
1495 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32);
1496 data[i++] = 1575 data[i++] =
1497 (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered); 1576 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
1577 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
1578 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
1579 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
1580 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
1498 data[i++] = 1581 data[i++] =
1499 (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered); 1582 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
1500 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt); 1583 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
1501 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); 1584 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
1502 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); 1585 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
1503 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); 1586 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
1504 data[i++] = mgp->rx_done.lro_mgr.stats.aggregated; 1587
1505 data[i++] = mgp->rx_done.lro_mgr.stats.flushed; 1588 data[i++] = 0;
1506 if (mgp->rx_done.lro_mgr.stats.flushed) 1589 data[i++] = (unsigned int)ss->tx.pkt_start;
1507 data[i++] = mgp->rx_done.lro_mgr.stats.aggregated / 1590 data[i++] = (unsigned int)ss->tx.pkt_done;
1508 mgp->rx_done.lro_mgr.stats.flushed; 1591 data[i++] = (unsigned int)ss->tx.req;
1592 data[i++] = (unsigned int)ss->tx.done;
1593 data[i++] = (unsigned int)ss->rx_small.cnt;
1594 data[i++] = (unsigned int)ss->rx_big.cnt;
1595 data[i++] = (unsigned int)ss->tx.wake_queue;
1596 data[i++] = (unsigned int)ss->tx.stop_queue;
1597 data[i++] = (unsigned int)ss->tx.linearized;
1598 data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
1599 data[i++] = ss->rx_done.lro_mgr.stats.flushed;
1600 if (ss->rx_done.lro_mgr.stats.flushed)
1601 data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
1602 ss->rx_done.lro_mgr.stats.flushed;
1509 else 1603 else
1510 data[i++] = 0; 1604 data[i++] = 0;
1511 data[i++] = mgp->rx_done.lro_mgr.stats.no_desc; 1605 data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
1512} 1606}
1513 1607
1514static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) 1608static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
@@ -1544,19 +1638,17 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
1544 .get_msglevel = myri10ge_get_msglevel 1638 .get_msglevel = myri10ge_get_msglevel
1545}; 1639};
1546 1640
1547static int myri10ge_allocate_rings(struct net_device *dev) 1641static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1548{ 1642{
1549 struct myri10ge_priv *mgp; 1643 struct myri10ge_priv *mgp = ss->mgp;
1550 struct myri10ge_cmd cmd; 1644 struct myri10ge_cmd cmd;
1645 struct net_device *dev = mgp->dev;
1551 int tx_ring_size, rx_ring_size; 1646 int tx_ring_size, rx_ring_size;
1552 int tx_ring_entries, rx_ring_entries; 1647 int tx_ring_entries, rx_ring_entries;
1553 int i, status; 1648 int i, status;
1554 size_t bytes; 1649 size_t bytes;
1555 1650
1556 mgp = netdev_priv(dev);
1557
1558 /* get ring sizes */ 1651 /* get ring sizes */
1559
1560 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); 1652 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
1561 tx_ring_size = cmd.data0; 1653 tx_ring_size = cmd.data0;
1562 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); 1654 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
@@ -1566,144 +1658,142 @@ static int myri10ge_allocate_rings(struct net_device *dev)
1566 1658
1567 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); 1659 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
1568 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); 1660 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
1569 mgp->tx.mask = tx_ring_entries - 1; 1661 ss->tx.mask = tx_ring_entries - 1;
1570 mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1; 1662 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
1571 1663
1572 status = -ENOMEM; 1664 status = -ENOMEM;
1573 1665
1574 /* allocate the host shadow rings */ 1666 /* allocate the host shadow rings */
1575 1667
1576 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) 1668 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
1577 * sizeof(*mgp->tx.req_list); 1669 * sizeof(*ss->tx.req_list);
1578 mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); 1670 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
1579 if (mgp->tx.req_bytes == NULL) 1671 if (ss->tx.req_bytes == NULL)
1580 goto abort_with_nothing; 1672 goto abort_with_nothing;
1581 1673
1582 /* ensure req_list entries are aligned to 8 bytes */ 1674 /* ensure req_list entries are aligned to 8 bytes */
1583 mgp->tx.req_list = (struct mcp_kreq_ether_send *) 1675 ss->tx.req_list = (struct mcp_kreq_ether_send *)
1584 ALIGN((unsigned long)mgp->tx.req_bytes, 8); 1676 ALIGN((unsigned long)ss->tx.req_bytes, 8);
1585 1677
1586 bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow); 1678 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
1587 mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); 1679 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
1588 if (mgp->rx_small.shadow == NULL) 1680 if (ss->rx_small.shadow == NULL)
1589 goto abort_with_tx_req_bytes; 1681 goto abort_with_tx_req_bytes;
1590 1682
1591 bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow); 1683 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
1592 mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); 1684 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
1593 if (mgp->rx_big.shadow == NULL) 1685 if (ss->rx_big.shadow == NULL)
1594 goto abort_with_rx_small_shadow; 1686 goto abort_with_rx_small_shadow;
1595 1687
1596 /* allocate the host info rings */ 1688 /* allocate the host info rings */
1597 1689
1598 bytes = tx_ring_entries * sizeof(*mgp->tx.info); 1690 bytes = tx_ring_entries * sizeof(*ss->tx.info);
1599 mgp->tx.info = kzalloc(bytes, GFP_KERNEL); 1691 ss->tx.info = kzalloc(bytes, GFP_KERNEL);
1600 if (mgp->tx.info == NULL) 1692 if (ss->tx.info == NULL)
1601 goto abort_with_rx_big_shadow; 1693 goto abort_with_rx_big_shadow;
1602 1694
1603 bytes = rx_ring_entries * sizeof(*mgp->rx_small.info); 1695 bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
1604 mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL); 1696 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
1605 if (mgp->rx_small.info == NULL) 1697 if (ss->rx_small.info == NULL)
1606 goto abort_with_tx_info; 1698 goto abort_with_tx_info;
1607 1699
1608 bytes = rx_ring_entries * sizeof(*mgp->rx_big.info); 1700 bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
1609 mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL); 1701 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
1610 if (mgp->rx_big.info == NULL) 1702 if (ss->rx_big.info == NULL)
1611 goto abort_with_rx_small_info; 1703 goto abort_with_rx_small_info;
1612 1704
1613 /* Fill the receive rings */ 1705 /* Fill the receive rings */
1614 mgp->rx_big.cnt = 0; 1706 ss->rx_big.cnt = 0;
1615 mgp->rx_small.cnt = 0; 1707 ss->rx_small.cnt = 0;
1616 mgp->rx_big.fill_cnt = 0; 1708 ss->rx_big.fill_cnt = 0;
1617 mgp->rx_small.fill_cnt = 0; 1709 ss->rx_small.fill_cnt = 0;
1618 mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; 1710 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
1619 mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; 1711 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
1620 mgp->rx_small.watchdog_needed = 0; 1712 ss->rx_small.watchdog_needed = 0;
1621 mgp->rx_big.watchdog_needed = 0; 1713 ss->rx_big.watchdog_needed = 0;
1622 myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, 1714 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
1623 mgp->small_bytes + MXGEFW_PAD, 0); 1715 mgp->small_bytes + MXGEFW_PAD, 0);
1624 1716
1625 if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) { 1717 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
1626 printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", 1718 printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n",
1627 dev->name, mgp->rx_small.fill_cnt); 1719 dev->name, ss->rx_small.fill_cnt);
1628 goto abort_with_rx_small_ring; 1720 goto abort_with_rx_small_ring;
1629 } 1721 }
1630 1722
1631 myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); 1723 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1632 if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) { 1724 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
1633 printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", 1725 printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n",
1634 dev->name, mgp->rx_big.fill_cnt); 1726 dev->name, ss->rx_big.fill_cnt);
1635 goto abort_with_rx_big_ring; 1727 goto abort_with_rx_big_ring;
1636 } 1728 }
1637 1729
1638 return 0; 1730 return 0;
1639 1731
1640abort_with_rx_big_ring: 1732abort_with_rx_big_ring:
1641 for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { 1733 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
1642 int idx = i & mgp->rx_big.mask; 1734 int idx = i & ss->rx_big.mask;
1643 myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], 1735 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
1644 mgp->big_bytes); 1736 mgp->big_bytes);
1645 put_page(mgp->rx_big.info[idx].page); 1737 put_page(ss->rx_big.info[idx].page);
1646 } 1738 }
1647 1739
1648abort_with_rx_small_ring: 1740abort_with_rx_small_ring:
1649 for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { 1741 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
1650 int idx = i & mgp->rx_small.mask; 1742 int idx = i & ss->rx_small.mask;
1651 myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], 1743 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
1652 mgp->small_bytes + MXGEFW_PAD); 1744 mgp->small_bytes + MXGEFW_PAD);
1653 put_page(mgp->rx_small.info[idx].page); 1745 put_page(ss->rx_small.info[idx].page);
1654 } 1746 }
1655 1747
1656 kfree(mgp->rx_big.info); 1748 kfree(ss->rx_big.info);
1657 1749
1658abort_with_rx_small_info: 1750abort_with_rx_small_info:
1659 kfree(mgp->rx_small.info); 1751 kfree(ss->rx_small.info);
1660 1752
1661abort_with_tx_info: 1753abort_with_tx_info:
1662 kfree(mgp->tx.info); 1754 kfree(ss->tx.info);
1663 1755
1664abort_with_rx_big_shadow: 1756abort_with_rx_big_shadow:
1665 kfree(mgp->rx_big.shadow); 1757 kfree(ss->rx_big.shadow);
1666 1758
1667abort_with_rx_small_shadow: 1759abort_with_rx_small_shadow:
1668 kfree(mgp->rx_small.shadow); 1760 kfree(ss->rx_small.shadow);
1669 1761
1670abort_with_tx_req_bytes: 1762abort_with_tx_req_bytes:
1671 kfree(mgp->tx.req_bytes); 1763 kfree(ss->tx.req_bytes);
1672 mgp->tx.req_bytes = NULL; 1764 ss->tx.req_bytes = NULL;
1673 mgp->tx.req_list = NULL; 1765 ss->tx.req_list = NULL;
1674 1766
1675abort_with_nothing: 1767abort_with_nothing:
1676 return status; 1768 return status;
1677} 1769}
1678 1770
1679static void myri10ge_free_rings(struct net_device *dev) 1771static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
1680{ 1772{
1681 struct myri10ge_priv *mgp; 1773 struct myri10ge_priv *mgp = ss->mgp;
1682 struct sk_buff *skb; 1774 struct sk_buff *skb;
1683 struct myri10ge_tx_buf *tx; 1775 struct myri10ge_tx_buf *tx;
1684 int i, len, idx; 1776 int i, len, idx;
1685 1777
1686 mgp = netdev_priv(dev); 1778 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
1687 1779 idx = i & ss->rx_big.mask;
1688 for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { 1780 if (i == ss->rx_big.fill_cnt - 1)
1689 idx = i & mgp->rx_big.mask; 1781 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
1690 if (i == mgp->rx_big.fill_cnt - 1) 1782 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
1691 mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
1692 myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
1693 mgp->big_bytes); 1783 mgp->big_bytes);
1694 put_page(mgp->rx_big.info[idx].page); 1784 put_page(ss->rx_big.info[idx].page);
1695 } 1785 }
1696 1786
1697 for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { 1787 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
1698 idx = i & mgp->rx_small.mask; 1788 idx = i & ss->rx_small.mask;
1699 if (i == mgp->rx_small.fill_cnt - 1) 1789 if (i == ss->rx_small.fill_cnt - 1)
1700 mgp->rx_small.info[idx].page_offset = 1790 ss->rx_small.info[idx].page_offset =
1701 MYRI10GE_ALLOC_SIZE; 1791 MYRI10GE_ALLOC_SIZE;
1702 myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], 1792 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
1703 mgp->small_bytes + MXGEFW_PAD); 1793 mgp->small_bytes + MXGEFW_PAD);
1704 put_page(mgp->rx_small.info[idx].page); 1794 put_page(ss->rx_small.info[idx].page);
1705 } 1795 }
1706 tx = &mgp->tx; 1796 tx = &ss->tx;
1707 while (tx->done != tx->req) { 1797 while (tx->done != tx->req) {
1708 idx = tx->done & tx->mask; 1798 idx = tx->done & tx->mask;
1709 skb = tx->info[idx].skb; 1799 skb = tx->info[idx].skb;
@@ -1714,7 +1804,7 @@ static void myri10ge_free_rings(struct net_device *dev)
1714 len = pci_unmap_len(&tx->info[idx], len); 1804 len = pci_unmap_len(&tx->info[idx], len);
1715 pci_unmap_len_set(&tx->info[idx], len, 0); 1805 pci_unmap_len_set(&tx->info[idx], len, 0);
1716 if (skb) { 1806 if (skb) {
1717 mgp->stats.tx_dropped++; 1807 ss->stats.tx_dropped++;
1718 dev_kfree_skb_any(skb); 1808 dev_kfree_skb_any(skb);
1719 if (len) 1809 if (len)
1720 pci_unmap_single(mgp->pdev, 1810 pci_unmap_single(mgp->pdev,
@@ -1729,19 +1819,19 @@ static void myri10ge_free_rings(struct net_device *dev)
1729 PCI_DMA_TODEVICE); 1819 PCI_DMA_TODEVICE);
1730 } 1820 }
1731 } 1821 }
1732 kfree(mgp->rx_big.info); 1822 kfree(ss->rx_big.info);
1733 1823
1734 kfree(mgp->rx_small.info); 1824 kfree(ss->rx_small.info);
1735 1825
1736 kfree(mgp->tx.info); 1826 kfree(ss->tx.info);
1737 1827
1738 kfree(mgp->rx_big.shadow); 1828 kfree(ss->rx_big.shadow);
1739 1829
1740 kfree(mgp->rx_small.shadow); 1830 kfree(ss->rx_small.shadow);
1741 1831
1742 kfree(mgp->tx.req_bytes); 1832 kfree(ss->tx.req_bytes);
1743 mgp->tx.req_bytes = NULL; 1833 ss->tx.req_bytes = NULL;
1744 mgp->tx.req_list = NULL; 1834 ss->tx.req_list = NULL;
1745} 1835}
1746 1836
1747static int myri10ge_request_irq(struct myri10ge_priv *mgp) 1837static int myri10ge_request_irq(struct myri10ge_priv *mgp)
@@ -1840,13 +1930,11 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
1840 1930
1841static int myri10ge_open(struct net_device *dev) 1931static int myri10ge_open(struct net_device *dev)
1842{ 1932{
1843 struct myri10ge_priv *mgp; 1933 struct myri10ge_priv *mgp = netdev_priv(dev);
1844 struct myri10ge_cmd cmd; 1934 struct myri10ge_cmd cmd;
1845 struct net_lro_mgr *lro_mgr; 1935 struct net_lro_mgr *lro_mgr;
1846 int status, big_pow2; 1936 int status, big_pow2;
1847 1937
1848 mgp = netdev_priv(dev);
1849
1850 if (mgp->running != MYRI10GE_ETH_STOPPED) 1938 if (mgp->running != MYRI10GE_ETH_STOPPED)
1851 return -EBUSY; 1939 return -EBUSY;
1852 1940
@@ -1883,16 +1971,16 @@ static int myri10ge_open(struct net_device *dev)
1883 /* get the lanai pointers to the send and receive rings */ 1971 /* get the lanai pointers to the send and receive rings */
1884 1972
1885 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); 1973 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
1886 mgp->tx.lanai = 1974 mgp->ss.tx.lanai =
1887 (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); 1975 (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0);
1888 1976
1889 status |= 1977 status |=
1890 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); 1978 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0);
1891 mgp->rx_small.lanai = 1979 mgp->ss.rx_small.lanai =
1892 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); 1980 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
1893 1981
1894 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); 1982 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
1895 mgp->rx_big.lanai = 1983 mgp->ss.rx_big.lanai =
1896 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); 1984 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
1897 1985
1898 if (status != 0) { 1986 if (status != 0) {
@@ -1904,15 +1992,15 @@ static int myri10ge_open(struct net_device *dev)
1904 } 1992 }
1905 1993
1906 if (myri10ge_wcfifo && mgp->wc_enabled) { 1994 if (myri10ge_wcfifo && mgp->wc_enabled) {
1907 mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; 1995 mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
1908 mgp->rx_small.wc_fifo = 1996 mgp->ss.rx_small.wc_fifo =
1909 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; 1997 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
1910 mgp->rx_big.wc_fifo = 1998 mgp->ss.rx_big.wc_fifo =
1911 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; 1999 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
1912 } else { 2000 } else {
1913 mgp->tx.wc_fifo = NULL; 2001 mgp->ss.tx.wc_fifo = NULL;
1914 mgp->rx_small.wc_fifo = NULL; 2002 mgp->ss.rx_small.wc_fifo = NULL;
1915 mgp->rx_big.wc_fifo = NULL; 2003 mgp->ss.rx_big.wc_fifo = NULL;
1916 } 2004 }
1917 2005
1918 /* Firmware needs the big buff size as a power of 2. Lie and 2006 /* Firmware needs the big buff size as a power of 2. Lie and
@@ -1929,7 +2017,7 @@ static int myri10ge_open(struct net_device *dev)
1929 mgp->big_bytes = big_pow2; 2017 mgp->big_bytes = big_pow2;
1930 } 2018 }
1931 2019
1932 status = myri10ge_allocate_rings(dev); 2020 status = myri10ge_allocate_rings(&mgp->ss);
1933 if (status != 0) 2021 if (status != 0)
1934 goto abort_with_irq; 2022 goto abort_with_irq;
1935 2023
@@ -1948,12 +2036,12 @@ static int myri10ge_open(struct net_device *dev)
1948 goto abort_with_rings; 2036 goto abort_with_rings;
1949 } 2037 }
1950 2038
1951 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus); 2039 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus);
1952 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus); 2040 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus);
1953 cmd.data2 = sizeof(struct mcp_irq_data); 2041 cmd.data2 = sizeof(struct mcp_irq_data);
1954 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); 2042 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
1955 if (status == -ENOSYS) { 2043 if (status == -ENOSYS) {
1956 dma_addr_t bus = mgp->fw_stats_bus; 2044 dma_addr_t bus = mgp->ss.fw_stats_bus;
1957 bus += offsetof(struct mcp_irq_data, send_done_count); 2045 bus += offsetof(struct mcp_irq_data, send_done_count);
1958 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); 2046 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
1959 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); 2047 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
@@ -1974,20 +2062,20 @@ static int myri10ge_open(struct net_device *dev)
1974 mgp->link_state = ~0U; 2062 mgp->link_state = ~0U;
1975 mgp->rdma_tags_available = 15; 2063 mgp->rdma_tags_available = 15;
1976 2064
1977 lro_mgr = &mgp->rx_done.lro_mgr; 2065 lro_mgr = &mgp->ss.rx_done.lro_mgr;
1978 lro_mgr->dev = dev; 2066 lro_mgr->dev = dev;
1979 lro_mgr->features = LRO_F_NAPI; 2067 lro_mgr->features = LRO_F_NAPI;
1980 lro_mgr->ip_summed = CHECKSUM_COMPLETE; 2068 lro_mgr->ip_summed = CHECKSUM_COMPLETE;
1981 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; 2069 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
1982 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; 2070 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
1983 lro_mgr->lro_arr = mgp->rx_done.lro_desc; 2071 lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc;
1984 lro_mgr->get_frag_header = myri10ge_get_frag_header; 2072 lro_mgr->get_frag_header = myri10ge_get_frag_header;
1985 lro_mgr->max_aggr = myri10ge_lro_max_pkts; 2073 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
1986 lro_mgr->frag_align_pad = 2; 2074 lro_mgr->frag_align_pad = 2;
1987 if (lro_mgr->max_aggr > MAX_SKB_FRAGS) 2075 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
1988 lro_mgr->max_aggr = MAX_SKB_FRAGS; 2076 lro_mgr->max_aggr = MAX_SKB_FRAGS;
1989 2077
1990 napi_enable(&mgp->napi); /* must happen prior to any irq */ 2078 napi_enable(&mgp->ss.napi); /* must happen prior to any irq */
1991 2079
1992 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); 2080 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
1993 if (status) { 2081 if (status) {
@@ -1996,8 +2084,8 @@ static int myri10ge_open(struct net_device *dev)
1996 goto abort_with_rings; 2084 goto abort_with_rings;
1997 } 2085 }
1998 2086
1999 mgp->wake_queue = 0; 2087 mgp->ss.tx.wake_queue = 0;
2000 mgp->stop_queue = 0; 2088 mgp->ss.tx.stop_queue = 0;
2001 mgp->running = MYRI10GE_ETH_RUNNING; 2089 mgp->running = MYRI10GE_ETH_RUNNING;
2002 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; 2090 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
2003 add_timer(&mgp->watchdog_timer); 2091 add_timer(&mgp->watchdog_timer);
@@ -2005,7 +2093,7 @@ static int myri10ge_open(struct net_device *dev)
2005 return 0; 2093 return 0;
2006 2094
2007abort_with_rings: 2095abort_with_rings:
2008 myri10ge_free_rings(dev); 2096 myri10ge_free_rings(&mgp->ss);
2009 2097
2010abort_with_irq: 2098abort_with_irq:
2011 myri10ge_free_irq(mgp); 2099 myri10ge_free_irq(mgp);
@@ -2017,21 +2105,19 @@ abort_with_nothing:
2017 2105
2018static int myri10ge_close(struct net_device *dev) 2106static int myri10ge_close(struct net_device *dev)
2019{ 2107{
2020 struct myri10ge_priv *mgp; 2108 struct myri10ge_priv *mgp = netdev_priv(dev);
2021 struct myri10ge_cmd cmd; 2109 struct myri10ge_cmd cmd;
2022 int status, old_down_cnt; 2110 int status, old_down_cnt;
2023 2111
2024 mgp = netdev_priv(dev);
2025
2026 if (mgp->running != MYRI10GE_ETH_RUNNING) 2112 if (mgp->running != MYRI10GE_ETH_RUNNING)
2027 return 0; 2113 return 0;
2028 2114
2029 if (mgp->tx.req_bytes == NULL) 2115 if (mgp->ss.tx.req_bytes == NULL)
2030 return 0; 2116 return 0;
2031 2117
2032 del_timer_sync(&mgp->watchdog_timer); 2118 del_timer_sync(&mgp->watchdog_timer);
2033 mgp->running = MYRI10GE_ETH_STOPPING; 2119 mgp->running = MYRI10GE_ETH_STOPPING;
2034 napi_disable(&mgp->napi); 2120 napi_disable(&mgp->ss.napi);
2035 netif_carrier_off(dev); 2121 netif_carrier_off(dev);
2036 netif_stop_queue(dev); 2122 netif_stop_queue(dev);
2037 old_down_cnt = mgp->down_cnt; 2123 old_down_cnt = mgp->down_cnt;
@@ -2047,7 +2133,7 @@ static int myri10ge_close(struct net_device *dev)
2047 2133
2048 netif_tx_disable(dev); 2134 netif_tx_disable(dev);
2049 myri10ge_free_irq(mgp); 2135 myri10ge_free_irq(mgp);
2050 myri10ge_free_rings(dev); 2136 myri10ge_free_rings(&mgp->ss);
2051 2137
2052 mgp->running = MYRI10GE_ETH_STOPPED; 2138 mgp->running = MYRI10GE_ETH_STOPPED;
2053 return 0; 2139 return 0;
@@ -2143,7 +2229,7 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
2143 2229
2144/* 2230/*
2145 * Transmit a packet. We need to split the packet so that a single 2231 * Transmit a packet. We need to split the packet so that a single
2146 * segment does not cross myri10ge->tx.boundary, so this makes segment 2232 * segment does not cross myri10ge->tx_boundary, so this makes segment
2147 * counting tricky. So rather than try to count segments up front, we 2233 * counting tricky. So rather than try to count segments up front, we
2148 * just give up if there are too few segments to hold a reasonably 2234 * just give up if there are too few segments to hold a reasonably
2149 * fragmented packet currently available. If we run 2235 * fragmented packet currently available. If we run
@@ -2154,8 +2240,9 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
2154static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) 2240static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
2155{ 2241{
2156 struct myri10ge_priv *mgp = netdev_priv(dev); 2242 struct myri10ge_priv *mgp = netdev_priv(dev);
2243 struct myri10ge_slice_state *ss;
2157 struct mcp_kreq_ether_send *req; 2244 struct mcp_kreq_ether_send *req;
2158 struct myri10ge_tx_buf *tx = &mgp->tx; 2245 struct myri10ge_tx_buf *tx;
2159 struct skb_frag_struct *frag; 2246 struct skb_frag_struct *frag;
2160 dma_addr_t bus; 2247 dma_addr_t bus;
2161 u32 low; 2248 u32 low;
@@ -2166,6 +2253,9 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
2166 int cum_len, seglen, boundary, rdma_count; 2253 int cum_len, seglen, boundary, rdma_count;
2167 u8 flags, odd_flag; 2254 u8 flags, odd_flag;
2168 2255
2256 /* always transmit through slot 0 */
2257 ss = &mgp->ss;
2258 tx = &ss->tx;
2169again: 2259again:
2170 req = tx->req_list; 2260 req = tx->req_list;
2171 avail = tx->mask - 1 - (tx->req - tx->done); 2261 avail = tx->mask - 1 - (tx->req - tx->done);
@@ -2180,7 +2270,7 @@ again:
2180 2270
2181 if ((unlikely(avail < max_segments))) { 2271 if ((unlikely(avail < max_segments))) {
2182 /* we are out of transmit resources */ 2272 /* we are out of transmit resources */
2183 mgp->stop_queue++; 2273 tx->stop_queue++;
2184 netif_stop_queue(dev); 2274 netif_stop_queue(dev);
2185 return 1; 2275 return 1;
2186 } 2276 }
@@ -2242,7 +2332,7 @@ again:
2242 if (skb_padto(skb, ETH_ZLEN)) { 2332 if (skb_padto(skb, ETH_ZLEN)) {
2243 /* The packet is gone, so we must 2333 /* The packet is gone, so we must
2244 * return 0 */ 2334 * return 0 */
2245 mgp->stats.tx_dropped += 1; 2335 ss->stats.tx_dropped += 1;
2246 return 0; 2336 return 0;
2247 } 2337 }
2248 /* adjust the len to account for the zero pad 2338 /* adjust the len to account for the zero pad
@@ -2284,7 +2374,7 @@ again:
2284 2374
2285 while (1) { 2375 while (1) {
2286 /* Break the SKB or Fragment up into pieces which 2376 /* Break the SKB or Fragment up into pieces which
2287 * do not cross mgp->tx.boundary */ 2377 * do not cross mgp->tx_boundary */
2288 low = MYRI10GE_LOWPART_TO_U32(bus); 2378 low = MYRI10GE_LOWPART_TO_U32(bus);
2289 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); 2379 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
2290 while (len) { 2380 while (len) {
@@ -2294,7 +2384,8 @@ again:
2294 if (unlikely(count == max_segments)) 2384 if (unlikely(count == max_segments))
2295 goto abort_linearize; 2385 goto abort_linearize;
2296 2386
2297 boundary = (low + tx->boundary) & ~(tx->boundary - 1); 2387 boundary =
2388 (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
2298 seglen = boundary - low; 2389 seglen = boundary - low;
2299 if (seglen > len) 2390 if (seglen > len)
2300 seglen = len; 2391 seglen = len;
@@ -2378,7 +2469,7 @@ again:
2378 myri10ge_submit_req_wc(tx, tx->req_list, count); 2469 myri10ge_submit_req_wc(tx, tx->req_list, count);
2379 tx->pkt_start++; 2470 tx->pkt_start++;
2380 if ((avail - count) < MXGEFW_MAX_SEND_DESC) { 2471 if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
2381 mgp->stop_queue++; 2472 tx->stop_queue++;
2382 netif_stop_queue(dev); 2473 netif_stop_queue(dev);
2383 } 2474 }
2384 dev->trans_start = jiffies; 2475 dev->trans_start = jiffies;
@@ -2420,12 +2511,12 @@ abort_linearize:
2420 if (skb_linearize(skb)) 2511 if (skb_linearize(skb))
2421 goto drop; 2512 goto drop;
2422 2513
2423 mgp->tx_linearized++; 2514 tx->linearized++;
2424 goto again; 2515 goto again;
2425 2516
2426drop: 2517drop:
2427 dev_kfree_skb_any(skb); 2518 dev_kfree_skb_any(skb);
2428 mgp->stats.tx_dropped += 1; 2519 ss->stats.tx_dropped += 1;
2429 return 0; 2520 return 0;
2430 2521
2431} 2522}
@@ -2433,7 +2524,7 @@ drop:
2433static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) 2524static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev)
2434{ 2525{
2435 struct sk_buff *segs, *curr; 2526 struct sk_buff *segs, *curr;
2436 struct myri10ge_priv *mgp = dev->priv; 2527 struct myri10ge_priv *mgp = netdev_priv(dev);
2437 int status; 2528 int status;
2438 2529
2439 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); 2530 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
@@ -2473,14 +2564,13 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
2473 2564
2474static void myri10ge_set_multicast_list(struct net_device *dev) 2565static void myri10ge_set_multicast_list(struct net_device *dev)
2475{ 2566{
2567 struct myri10ge_priv *mgp = netdev_priv(dev);
2476 struct myri10ge_cmd cmd; 2568 struct myri10ge_cmd cmd;
2477 struct myri10ge_priv *mgp;
2478 struct dev_mc_list *mc_list; 2569 struct dev_mc_list *mc_list;
2479 __be32 data[2] = { 0, 0 }; 2570 __be32 data[2] = { 0, 0 };
2480 int err; 2571 int err;
2481 DECLARE_MAC_BUF(mac); 2572 DECLARE_MAC_BUF(mac);
2482 2573
2483 mgp = netdev_priv(dev);
2484 /* can be called from atomic contexts, 2574 /* can be called from atomic contexts,
2485 * pass 1 to force atomicity in myri10ge_send_cmd() */ 2575 * pass 1 to force atomicity in myri10ge_send_cmd() */
2486 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); 2576 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
@@ -2616,13 +2706,14 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
2616 ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; 2706 ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
2617 if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { 2707 if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
2618 if (myri10ge_ecrc_enable > 1) { 2708 if (myri10ge_ecrc_enable > 1) {
2619 struct pci_dev *old_bridge = bridge; 2709 struct pci_dev *prev_bridge, *old_bridge = bridge;
2620 2710
2621 /* Walk the hierarchy up to the root port 2711 /* Walk the hierarchy up to the root port
2622 * where ECRC has to be enabled */ 2712 * where ECRC has to be enabled */
2623 do { 2713 do {
2714 prev_bridge = bridge;
2624 bridge = bridge->bus->self; 2715 bridge = bridge->bus->self;
2625 if (!bridge) { 2716 if (!bridge || prev_bridge == bridge) {
2626 dev_err(dev, 2717 dev_err(dev,
2627 "Failed to find root port" 2718 "Failed to find root port"
2628 " to force ECRC\n"); 2719 " to force ECRC\n");
@@ -2681,9 +2772,9 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
2681 * already been enabled, then it must use a firmware image which works 2772 * already been enabled, then it must use a firmware image which works
2682 * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it 2773 * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it
2683 * should also ensure that it never gives the device a Read-DMA which is 2774 * should also ensure that it never gives the device a Read-DMA which is
2684 * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is 2775 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
2685 * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) 2776 * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat)
2686 * firmware image, and set tx.boundary to 4KB. 2777 * firmware image, and set tx_boundary to 4KB.
2687 */ 2778 */
2688 2779
2689static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) 2780static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
@@ -2692,7 +2783,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
2692 struct device *dev = &pdev->dev; 2783 struct device *dev = &pdev->dev;
2693 int status; 2784 int status;
2694 2785
2695 mgp->tx.boundary = 4096; 2786 mgp->tx_boundary = 4096;
2696 /* 2787 /*
2697 * Verify the max read request size was set to 4KB 2788 * Verify the max read request size was set to 4KB
2698 * before trying the test with 4KB. 2789 * before trying the test with 4KB.
@@ -2704,7 +2795,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
2704 } 2795 }
2705 if (status != 4096) { 2796 if (status != 4096) {
2706 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); 2797 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
2707 mgp->tx.boundary = 2048; 2798 mgp->tx_boundary = 2048;
2708 } 2799 }
2709 /* 2800 /*
2710 * load the optimized firmware (which assumes aligned PCIe 2801 * load the optimized firmware (which assumes aligned PCIe
@@ -2737,7 +2828,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
2737 "Please install up to date fw\n"); 2828 "Please install up to date fw\n");
2738abort: 2829abort:
2739 /* fall back to using the unaligned firmware */ 2830 /* fall back to using the unaligned firmware */
2740 mgp->tx.boundary = 2048; 2831 mgp->tx_boundary = 2048;
2741 mgp->fw_name = myri10ge_fw_unaligned; 2832 mgp->fw_name = myri10ge_fw_unaligned;
2742 2833
2743} 2834}
@@ -2758,7 +2849,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
2758 if (link_width < 8) { 2849 if (link_width < 8) {
2759 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", 2850 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
2760 link_width); 2851 link_width);
2761 mgp->tx.boundary = 4096; 2852 mgp->tx_boundary = 4096;
2762 mgp->fw_name = myri10ge_fw_aligned; 2853 mgp->fw_name = myri10ge_fw_aligned;
2763 } else { 2854 } else {
2764 myri10ge_firmware_probe(mgp); 2855 myri10ge_firmware_probe(mgp);
@@ -2767,12 +2858,12 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
2767 if (myri10ge_force_firmware == 1) { 2858 if (myri10ge_force_firmware == 1) {
2768 dev_info(&mgp->pdev->dev, 2859 dev_info(&mgp->pdev->dev,
2769 "Assuming aligned completions (forced)\n"); 2860 "Assuming aligned completions (forced)\n");
2770 mgp->tx.boundary = 4096; 2861 mgp->tx_boundary = 4096;
2771 mgp->fw_name = myri10ge_fw_aligned; 2862 mgp->fw_name = myri10ge_fw_aligned;
2772 } else { 2863 } else {
2773 dev_info(&mgp->pdev->dev, 2864 dev_info(&mgp->pdev->dev,
2774 "Assuming unaligned completions (forced)\n"); 2865 "Assuming unaligned completions (forced)\n");
2775 mgp->tx.boundary = 2048; 2866 mgp->tx_boundary = 2048;
2776 mgp->fw_name = myri10ge_fw_unaligned; 2867 mgp->fw_name = myri10ge_fw_unaligned;
2777 } 2868 }
2778 } 2869 }
@@ -2889,6 +2980,7 @@ static void myri10ge_watchdog(struct work_struct *work)
2889{ 2980{
2890 struct myri10ge_priv *mgp = 2981 struct myri10ge_priv *mgp =
2891 container_of(work, struct myri10ge_priv, watchdog_work); 2982 container_of(work, struct myri10ge_priv, watchdog_work);
2983 struct myri10ge_tx_buf *tx;
2892 u32 reboot; 2984 u32 reboot;
2893 int status; 2985 int status;
2894 u16 cmd, vendor; 2986 u16 cmd, vendor;
@@ -2938,15 +3030,16 @@ static void myri10ge_watchdog(struct work_struct *work)
2938 3030
2939 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", 3031 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
2940 mgp->dev->name); 3032 mgp->dev->name);
3033 tx = &mgp->ss.tx;
2941 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3034 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
2942 mgp->dev->name, mgp->tx.req, mgp->tx.done, 3035 mgp->dev->name, tx->req, tx->done,
2943 mgp->tx.pkt_start, mgp->tx.pkt_done, 3036 tx->pkt_start, tx->pkt_done,
2944 (int)ntohl(mgp->fw_stats->send_done_count)); 3037 (int)ntohl(mgp->ss.fw_stats->send_done_count));
2945 msleep(2000); 3038 msleep(2000);
2946 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3039 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
2947 mgp->dev->name, mgp->tx.req, mgp->tx.done, 3040 mgp->dev->name, tx->req, tx->done,
2948 mgp->tx.pkt_start, mgp->tx.pkt_done, 3041 tx->pkt_start, tx->pkt_done,
2949 (int)ntohl(mgp->fw_stats->send_done_count)); 3042 (int)ntohl(mgp->ss.fw_stats->send_done_count));
2950 } 3043 }
2951 rtnl_lock(); 3044 rtnl_lock();
2952 myri10ge_close(mgp->dev); 3045 myri10ge_close(mgp->dev);
@@ -2969,28 +3062,31 @@ static void myri10ge_watchdog(struct work_struct *work)
2969static void myri10ge_watchdog_timer(unsigned long arg) 3062static void myri10ge_watchdog_timer(unsigned long arg)
2970{ 3063{
2971 struct myri10ge_priv *mgp; 3064 struct myri10ge_priv *mgp;
3065 struct myri10ge_slice_state *ss;
2972 u32 rx_pause_cnt; 3066 u32 rx_pause_cnt;
2973 3067
2974 mgp = (struct myri10ge_priv *)arg; 3068 mgp = (struct myri10ge_priv *)arg;
2975 3069
2976 if (mgp->rx_small.watchdog_needed) { 3070 rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause);
2977 myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, 3071
3072 ss = &mgp->ss;
3073 if (ss->rx_small.watchdog_needed) {
3074 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
2978 mgp->small_bytes + MXGEFW_PAD, 1); 3075 mgp->small_bytes + MXGEFW_PAD, 1);
2979 if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >= 3076 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
2980 myri10ge_fill_thresh) 3077 myri10ge_fill_thresh)
2981 mgp->rx_small.watchdog_needed = 0; 3078 ss->rx_small.watchdog_needed = 0;
2982 } 3079 }
2983 if (mgp->rx_big.watchdog_needed) { 3080 if (ss->rx_big.watchdog_needed) {
2984 myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1); 3081 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1);
2985 if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >= 3082 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
2986 myri10ge_fill_thresh) 3083 myri10ge_fill_thresh)
2987 mgp->rx_big.watchdog_needed = 0; 3084 ss->rx_big.watchdog_needed = 0;
2988 } 3085 }
2989 rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause);
2990 3086
2991 if (mgp->tx.req != mgp->tx.done && 3087 if (ss->tx.req != ss->tx.done &&
2992 mgp->tx.done == mgp->watchdog_tx_done && 3088 ss->tx.done == ss->watchdog_tx_done &&
2993 mgp->watchdog_tx_req != mgp->watchdog_tx_done) { 3089 ss->watchdog_tx_req != ss->watchdog_tx_done) {
2994 /* nic seems like it might be stuck.. */ 3090 /* nic seems like it might be stuck.. */
2995 if (rx_pause_cnt != mgp->watchdog_pause) { 3091 if (rx_pause_cnt != mgp->watchdog_pause) {
2996 if (net_ratelimit()) 3092 if (net_ratelimit())
@@ -3005,8 +3101,8 @@ static void myri10ge_watchdog_timer(unsigned long arg)
3005 /* rearm timer */ 3101 /* rearm timer */
3006 mod_timer(&mgp->watchdog_timer, 3102 mod_timer(&mgp->watchdog_timer,
3007 jiffies + myri10ge_watchdog_timeout * HZ); 3103 jiffies + myri10ge_watchdog_timeout * HZ);
3008 mgp->watchdog_tx_done = mgp->tx.done; 3104 ss->watchdog_tx_done = ss->tx.done;
3009 mgp->watchdog_tx_req = mgp->tx.req; 3105 ss->watchdog_tx_req = ss->tx.req;
3010 mgp->watchdog_pause = rx_pause_cnt; 3106 mgp->watchdog_pause = rx_pause_cnt;
3011} 3107}
3012 3108
@@ -3030,7 +3126,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3030 3126
3031 mgp = netdev_priv(netdev); 3127 mgp = netdev_priv(netdev);
3032 mgp->dev = netdev; 3128 mgp->dev = netdev;
3033 netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight); 3129 netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight);
3034 mgp->pdev = pdev; 3130 mgp->pdev = pdev;
3035 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 3131 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
3036 mgp->pause = myri10ge_flow_control; 3132 mgp->pause = myri10ge_flow_control;
@@ -3076,9 +3172,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3076 if (mgp->cmd == NULL) 3172 if (mgp->cmd == NULL)
3077 goto abort_with_netdev; 3173 goto abort_with_netdev;
3078 3174
3079 mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats), 3175 mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
3080 &mgp->fw_stats_bus, GFP_KERNEL); 3176 &mgp->ss.fw_stats_bus, GFP_KERNEL);
3081 if (mgp->fw_stats == NULL) 3177 if (mgp->ss.fw_stats == NULL)
3082 goto abort_with_cmd; 3178 goto abort_with_cmd;
3083 3179
3084 mgp->board_span = pci_resource_len(pdev, 0); 3180 mgp->board_span = pci_resource_len(pdev, 0);
@@ -3118,12 +3214,12 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3118 netdev->dev_addr[i] = mgp->mac_addr[i]; 3214 netdev->dev_addr[i] = mgp->mac_addr[i];
3119 3215
3120 /* allocate rx done ring */ 3216 /* allocate rx done ring */
3121 bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 3217 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3122 mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3218 mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3123 &mgp->rx_done.bus, GFP_KERNEL); 3219 &mgp->ss.rx_done.bus, GFP_KERNEL);
3124 if (mgp->rx_done.entry == NULL) 3220 if (mgp->ss.rx_done.entry == NULL)
3125 goto abort_with_ioremap; 3221 goto abort_with_ioremap;
3126 memset(mgp->rx_done.entry, 0, bytes); 3222 memset(mgp->ss.rx_done.entry, 0, bytes);
3127 3223
3128 myri10ge_select_firmware(mgp); 3224 myri10ge_select_firmware(mgp);
3129 3225
@@ -3183,7 +3279,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3183 } 3279 }
3184 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 3280 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
3185 (mgp->msi_enabled ? "MSI" : "xPIC"), 3281 (mgp->msi_enabled ? "MSI" : "xPIC"),
3186 netdev->irq, mgp->tx.boundary, mgp->fw_name, 3282 netdev->irq, mgp->tx_boundary, mgp->fw_name,
3187 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3283 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3188 3284
3189 return 0; 3285 return 0;
@@ -3195,9 +3291,9 @@ abort_with_firmware:
3195 myri10ge_dummy_rdma(mgp, 0); 3291 myri10ge_dummy_rdma(mgp, 0);
3196 3292
3197abort_with_rx_done: 3293abort_with_rx_done:
3198 bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 3294 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3199 dma_free_coherent(&pdev->dev, bytes, 3295 dma_free_coherent(&pdev->dev, bytes,
3200 mgp->rx_done.entry, mgp->rx_done.bus); 3296 mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
3201 3297
3202abort_with_ioremap: 3298abort_with_ioremap:
3203 iounmap(mgp->sram); 3299 iounmap(mgp->sram);
@@ -3207,8 +3303,8 @@ abort_with_wc:
3207 if (mgp->mtrr >= 0) 3303 if (mgp->mtrr >= 0)
3208 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3304 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
3209#endif 3305#endif
3210 dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), 3306 dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
3211 mgp->fw_stats, mgp->fw_stats_bus); 3307 mgp->ss.fw_stats, mgp->ss.fw_stats_bus);
3212 3308
3213abort_with_cmd: 3309abort_with_cmd:
3214 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3310 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
@@ -3246,9 +3342,9 @@ static void myri10ge_remove(struct pci_dev *pdev)
3246 /* avoid a memory leak */ 3342 /* avoid a memory leak */
3247 pci_restore_state(pdev); 3343 pci_restore_state(pdev);
3248 3344
3249 bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 3345 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3250 dma_free_coherent(&pdev->dev, bytes, 3346 dma_free_coherent(&pdev->dev, bytes,
3251 mgp->rx_done.entry, mgp->rx_done.bus); 3347 mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
3252 3348
3253 iounmap(mgp->sram); 3349 iounmap(mgp->sram);
3254 3350
@@ -3256,8 +3352,8 @@ static void myri10ge_remove(struct pci_dev *pdev)
3256 if (mgp->mtrr >= 0) 3352 if (mgp->mtrr >= 0)
3257 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3353 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
3258#endif 3354#endif
3259 dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), 3355 dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
3260 mgp->fw_stats, mgp->fw_stats_bus); 3356 mgp->ss.fw_stats, mgp->ss.fw_stats_bus);
3261 3357
3262 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3358 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3263 mgp->cmd, mgp->cmd_bus); 3359 mgp->cmd, mgp->cmd_bus);
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index 58e57178c563..fdbeeee07372 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -10,7 +10,7 @@ struct mcp_dma_addr {
10 __be32 low; 10 __be32 low;
11}; 11};
12 12
13/* 4 Bytes. 8 Bytes for NDIS drivers. */ 13/* 4 Bytes */
14struct mcp_slot { 14struct mcp_slot {
15 __sum16 checksum; 15 __sum16 checksum;
16 __be16 length; 16 __be16 length;
@@ -144,6 +144,7 @@ enum myri10ge_mcp_cmd_type {
144 * a power of 2 number of entries. */ 144 * a power of 2 number of entries. */
145 145
146 MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ 146 MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */
147#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31)
147 148
148 /* command to bring ethernet interface up. Above parameters 149 /* command to bring ethernet interface up. Above parameters
149 * (plus mtu & mac address) must have been exchanged prior 150 * (plus mtu & mac address) must have been exchanged prior
@@ -221,10 +222,14 @@ enum myri10ge_mcp_cmd_type {
221 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 222 MXGEFW_CMD_GET_MAX_RSS_QUEUES,
222 MXGEFW_CMD_ENABLE_RSS_QUEUES, 223 MXGEFW_CMD_ENABLE_RSS_QUEUES,
223 /* data0 = number of slices n (0, 1, ..., n-1) to enable 224 /* data0 = number of slices n (0, 1, ..., n-1) to enable
224 * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue. 225 * data1 = interrupt mode.
226 * 0=share one INTx/MSI, 1=use one MSI-X per queue.
225 * If all queues share one interrupt, the driver must have set 227 * If all queues share one interrupt, the driver must have set
226 * RSS_SHARED_INTERRUPT_DMA before enabling queues. 228 * RSS_SHARED_INTERRUPT_DMA before enabling queues.
227 */ 229 */
230#define MXGEFW_SLICE_INTR_MODE_SHARED 0
231#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1
232
228 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 233 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,
229 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, 234 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,
230 /* data0, data1 = bus address lsw, msw */ 235 /* data0, data1 = bus address lsw, msw */
@@ -241,10 +246,14 @@ enum myri10ge_mcp_cmd_type {
241 * 0: disable rss. nic does not distribute receive packets. 246 * 0: disable rss. nic does not distribute receive packets.
242 * 1: enable rss. nic distributes receive packets among queues. 247 * 1: enable rss. nic distributes receive packets among queues.
243 * data1 = hash type 248 * data1 = hash type
244 * 1: IPV4 249 * 1: IPV4 (required by RSS)
245 * 2: TCP_IPV4 250 * 2: TCP_IPV4 (required by RSS)
246 * 3: IPV4 | TCP_IPV4 251 * 3: IPV4 | TCP_IPV4 (required by RSS)
252 * 4: source port
247 */ 253 */
254#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1
255#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2
256#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4
248 257
249 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 258 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
250 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. 259 /* Return data = the max. size of the entire headers of a IPv6 TSO packet.
@@ -260,6 +269,8 @@ enum myri10ge_mcp_cmd_type {
260 * 0: Linux/FreeBSD style (NIC default) 269 * 0: Linux/FreeBSD style (NIC default)
261 * 1: NDIS/NetBSD style 270 * 1: NDIS/NetBSD style
262 */ 271 */
272#define MXGEFW_TSO_MODE_LINUX 0
273#define MXGEFW_TSO_MODE_NDIS 1
263 274
264 MXGEFW_CMD_MDIO_READ, 275 MXGEFW_CMD_MDIO_READ,
265 /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ 276 /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */
@@ -286,6 +297,38 @@ enum myri10ge_mcp_cmd_type {
286 /* Return data = NIC memory offset of mcp_vpump_public_global */ 297 /* Return data = NIC memory offset of mcp_vpump_public_global */
287 MXGEFW_CMD_RESET_VPUMP, 298 MXGEFW_CMD_RESET_VPUMP,
288 /* Resets the VPUMP state */ 299 /* Resets the VPUMP state */
300
301 MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE,
302 /* data0 = mcp_slot type to use.
303 * 0 = the default 4B mcp_slot
304 * 1 = 8B mcp_slot_8
305 */
306#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0
307#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1
308
309 MXGEFW_CMD_SET_THROTTLE_FACTOR,
310 /* set the throttle factor for ethp_z8e
311 * data0 = throttle_factor
312 * throttle_factor = 256 * pcie-raw-speed / tx_speed
313 * tx_speed = 256 * pcie-raw-speed / throttle_factor
314 *
315 * For PCI-E x8: pcie-raw-speed == 16Gb/s
316 * For PCI-E x4: pcie-raw-speed == 8Gb/s
317 *
318 * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s
319 * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s
320 *
321 * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s
322 * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s
323 */
324
325 MXGEFW_CMD_VPUMP_UP,
326 /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */
327 MXGEFW_CMD_GET_VPUMP_CLK,
328 /* Get the lanai clock */
329
330 MXGEFW_CMD_GET_DCA_OFFSET,
331 /* offset of dca control for WDMAs */
289}; 332};
290 333
291enum myri10ge_mcp_cmd_status { 334enum myri10ge_mcp_cmd_status {
@@ -302,7 +345,8 @@ enum myri10ge_mcp_cmd_status {
302 MXGEFW_CMD_ERROR_UNALIGNED, 345 MXGEFW_CMD_ERROR_UNALIGNED,
303 MXGEFW_CMD_ERROR_NO_MDIO, 346 MXGEFW_CMD_ERROR_NO_MDIO,
304 MXGEFW_CMD_ERROR_XFP_FAILURE, 347 MXGEFW_CMD_ERROR_XFP_FAILURE,
305 MXGEFW_CMD_ERROR_XFP_ABSENT 348 MXGEFW_CMD_ERROR_XFP_ABSENT,
349 MXGEFW_CMD_ERROR_BAD_PCIE_LINK
306}; 350};
307 351
308#define MXGEFW_OLD_IRQ_DATA_LEN 40 352#define MXGEFW_OLD_IRQ_DATA_LEN 40
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index 16a810dd6d51..07d65c2cbb24 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -1,30 +1,6 @@
1#ifndef __MYRI10GE_MCP_GEN_HEADER_H__ 1#ifndef __MYRI10GE_MCP_GEN_HEADER_H__
2#define __MYRI10GE_MCP_GEN_HEADER_H__ 2#define __MYRI10GE_MCP_GEN_HEADER_H__
3 3
4/* this file define a standard header used as a first entry point to
5 * exchange information between firmware/driver and driver. The
6 * header structure can be anywhere in the mcp. It will usually be in
7 * the .data section, because some fields needs to be initialized at
8 * compile time.
9 * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must
10 * contains the location of the header.
11 *
12 * Typically a MCP will start with the following:
13 * .text
14 * .space 52 ! to help catch MEMORY_INT errors
15 * bt start ! jump to real code
16 * nop
17 * .long _gen_mcp_header
18 *
19 * The source will have a definition like:
20 *
21 * mcp_gen_header_t gen_mcp_header = {
22 * .header_length = sizeof(mcp_gen_header_t),
23 * .mcp_type = MCP_TYPE_XXX,
24 * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $",
25 * .mcp_globals = (unsigned)&Globals
26 * };
27 */
28 4
29#define MCP_HEADER_PTR_OFFSET 0x3c 5#define MCP_HEADER_PTR_OFFSET 0x3c
30 6
@@ -32,13 +8,14 @@
32#define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ 8#define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */
33#define MCP_TYPE_ETH 0x45544820 /* "ETH " */ 9#define MCP_TYPE_ETH 0x45544820 /* "ETH " */
34#define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ 10#define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */
11#define MCP_TYPE_DFLT 0x20202020 /* " " */
35 12
36struct mcp_gen_header { 13struct mcp_gen_header {
37 /* the first 4 fields are filled at compile time */ 14 /* the first 4 fields are filled at compile time */
38 unsigned header_length; 15 unsigned header_length;
39 __be32 mcp_type; 16 __be32 mcp_type;
40 char version[128]; 17 char version[128];
41 unsigned mcp_globals; /* pointer to mcp-type specific structure */ 18 unsigned mcp_private; /* pointer to mcp-type specific structure */
42 19
43 /* filled by the MCP at run-time */ 20 /* filled by the MCP at run-time */
44 unsigned sram_size; 21 unsigned sram_size;
@@ -53,6 +30,18 @@ struct mcp_gen_header {
53 * 30 *
54 * Never remove any field. Keep everything naturally align. 31 * Never remove any field. Keep everything naturally align.
55 */ 32 */
33
34 /* Specifies if the running mcp is mcp0, 1, or 2. */
35 unsigned char mcp_index;
36 unsigned char disable_rabbit;
37 unsigned char unaligned_tlp;
38 unsigned char pad1;
39 unsigned counters_addr;
40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */
41 unsigned short handoff_id_major; /* must be equal */
42 unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */
43 unsigned msix_table_addr; /* start address of msix table in firmware */
44 /* 8 */
56}; 45};
57 46
58#endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */ 47#endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 4009c4ce96b4..918f802fe089 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -1,6 +1,6 @@
1/* niu.c: Neptune ethernet driver. 1/* niu.c: Neptune ethernet driver.
2 * 2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#include <linux/module.h> 6#include <linux/module.h>
@@ -33,8 +33,8 @@
33 33
34#define DRV_MODULE_NAME "niu" 34#define DRV_MODULE_NAME "niu"
35#define PFX DRV_MODULE_NAME ": " 35#define PFX DRV_MODULE_NAME ": "
36#define DRV_MODULE_VERSION "0.8" 36#define DRV_MODULE_VERSION "0.9"
37#define DRV_MODULE_RELDATE "April 24, 2008" 37#define DRV_MODULE_RELDATE "May 4, 2008"
38 38
39static char version[] __devinitdata = 39static char version[] __devinitdata =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -865,7 +865,6 @@ static int link_status_1g_serdes(struct niu *np, int *link_up_p)
865 return 0; 865 return 0;
866} 866}
867 867
868
869static int link_status_10g_serdes(struct niu *np, int *link_up_p) 868static int link_status_10g_serdes(struct niu *np, int *link_up_p)
870{ 869{
871 unsigned long flags; 870 unsigned long flags;
@@ -900,7 +899,6 @@ static int link_status_10g_serdes(struct niu *np, int *link_up_p)
900 return 0; 899 return 0;
901} 900}
902 901
903
904static int link_status_1g_rgmii(struct niu *np, int *link_up_p) 902static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
905{ 903{
906 struct niu_link_config *lp = &np->link_config; 904 struct niu_link_config *lp = &np->link_config;
@@ -957,7 +955,6 @@ out:
957 return err; 955 return err;
958} 956}
959 957
960
961static int bcm8704_reset(struct niu *np) 958static int bcm8704_reset(struct niu *np)
962{ 959{
963 int err, limit; 960 int err, limit;
@@ -1357,8 +1354,6 @@ static int mii_reset(struct niu *np)
1357 return 0; 1354 return 0;
1358} 1355}
1359 1356
1360
1361
1362static int xcvr_init_1g_rgmii(struct niu *np) 1357static int xcvr_init_1g_rgmii(struct niu *np)
1363{ 1358{
1364 int err; 1359 int err;
@@ -1419,7 +1414,6 @@ static int xcvr_init_1g_rgmii(struct niu *np)
1419 return 0; 1414 return 0;
1420} 1415}
1421 1416
1422
1423static int mii_init_common(struct niu *np) 1417static int mii_init_common(struct niu *np)
1424{ 1418{
1425 struct niu_link_config *lp = &np->link_config; 1419 struct niu_link_config *lp = &np->link_config;
@@ -7008,31 +7002,20 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np,
7008 return 0; 7002 return 0;
7009} 7003}
7010 7004
7011/* niu board models have a trailing dash version incremented
7012 * with HW rev change. Need to ingnore the dash version while
7013 * checking for match
7014 *
7015 * for example, for the 10G card the current vpd.board_model
7016 * is 501-5283-04, of which -04 is the dash version and have
7017 * to be ignored
7018 */
7019static int niu_board_model_match(struct niu *np, const char *model)
7020{
7021 return !strncmp(np->vpd.board_model, model, strlen(model));
7022}
7023
7024static int niu_pci_vpd_get_nports(struct niu *np) 7005static int niu_pci_vpd_get_nports(struct niu *np)
7025{ 7006{
7026 int ports = 0; 7007 int ports = 0;
7027 7008
7028 if ((niu_board_model_match(np, NIU_QGC_LP_BM_STR)) || 7009 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
7029 (niu_board_model_match(np, NIU_QGC_PEM_BM_STR)) || 7010 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
7030 (niu_board_model_match(np, NIU_ALONSO_BM_STR))) { 7011 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
7012 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
7013 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
7031 ports = 4; 7014 ports = 4;
7032 } else if ((niu_board_model_match(np, NIU_2XGF_LP_BM_STR)) || 7015 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
7033 (niu_board_model_match(np, NIU_2XGF_PEM_BM_STR)) || 7016 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
7034 (niu_board_model_match(np, NIU_FOXXY_BM_STR)) || 7017 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
7035 (niu_board_model_match(np, NIU_2XGF_MRVL_BM_STR))) { 7018 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
7036 ports = 2; 7019 ports = 2;
7037 } 7020 }
7038 7021
@@ -7053,8 +7036,8 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
7053 return; 7036 return;
7054 } 7037 }
7055 7038
7056 if (!strcmp(np->vpd.model, "SUNW,CP3220") || 7039 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
7057 !strcmp(np->vpd.model, "SUNW,CP3260")) { 7040 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
7058 np->flags |= NIU_FLAGS_10G; 7041 np->flags |= NIU_FLAGS_10G;
7059 np->flags &= ~NIU_FLAGS_FIBER; 7042 np->flags &= ~NIU_FLAGS_FIBER;
7060 np->flags |= NIU_FLAGS_XCVR_SERDES; 7043 np->flags |= NIU_FLAGS_XCVR_SERDES;
@@ -7065,7 +7048,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
7065 } 7048 }
7066 if (np->flags & NIU_FLAGS_10G) 7049 if (np->flags & NIU_FLAGS_10G)
7067 np->mac_xcvr = MAC_XCVR_XPCS; 7050 np->mac_xcvr = MAC_XCVR_XPCS;
7068 } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { 7051 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
7069 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 7052 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
7070 NIU_FLAGS_HOTPLUG_PHY); 7053 NIU_FLAGS_HOTPLUG_PHY);
7071 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 7054 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
@@ -7264,8 +7247,11 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
7264 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & 7247 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
7265 ESPC_NUM_PORTS_MACS_VAL; 7248 ESPC_NUM_PORTS_MACS_VAL;
7266 7249
7250 /* All of the current probing methods fail on
7251 * Maramba on-board parts.
7252 */
7267 if (!parent->num_ports) 7253 if (!parent->num_ports)
7268 return -ENODEV; 7254 parent->num_ports = 4;
7269 } 7255 }
7270 } 7256 }
7271 } 7257 }
@@ -7538,8 +7524,8 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7538 u32 val; 7524 u32 val;
7539 int err; 7525 int err;
7540 7526
7541 if (!strcmp(np->vpd.model, "SUNW,CP3220") || 7527 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
7542 !strcmp(np->vpd.model, "SUNW,CP3260")) { 7528 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
7543 num_10g = 0; 7529 num_10g = 0;
7544 num_1g = 2; 7530 num_1g = 2;
7545 parent->plat_type = PLAT_TYPE_ATCA_CP3220; 7531 parent->plat_type = PLAT_TYPE_ATCA_CP3220;
@@ -7548,7 +7534,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7548 phy_encode(PORT_TYPE_1G, 1) | 7534 phy_encode(PORT_TYPE_1G, 1) |
7549 phy_encode(PORT_TYPE_1G, 2) | 7535 phy_encode(PORT_TYPE_1G, 2) |
7550 phy_encode(PORT_TYPE_1G, 3)); 7536 phy_encode(PORT_TYPE_1G, 3));
7551 } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { 7537 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
7552 num_10g = 2; 7538 num_10g = 2;
7553 num_1g = 0; 7539 num_1g = 0;
7554 parent->num_ports = 2; 7540 parent->num_ports = 2;
@@ -7943,6 +7929,7 @@ static int __devinit niu_get_of_props(struct niu *np)
7943 struct device_node *dp; 7929 struct device_node *dp;
7944 const char *phy_type; 7930 const char *phy_type;
7945 const u8 *mac_addr; 7931 const u8 *mac_addr;
7932 const char *model;
7946 int prop_len; 7933 int prop_len;
7947 7934
7948 if (np->parent->plat_type == PLAT_TYPE_NIU) 7935 if (np->parent->plat_type == PLAT_TYPE_NIU)
@@ -7997,6 +7984,11 @@ static int __devinit niu_get_of_props(struct niu *np)
7997 7984
7998 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 7985 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
7999 7986
7987 model = of_get_property(dp, "model", &prop_len);
7988
7989 if (model)
7990 strcpy(np->vpd.model, model);
7991
8000 return 0; 7992 return 0;
8001#else 7993#else
8002 return -EINVAL; 7994 return -EINVAL;
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 101a3f1a8ec8..c6fa883daa22 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2946,6 +2946,15 @@ struct rx_ring_info {
2946#define NIU_ALONSO_BM_STR "373-0202" 2946#define NIU_ALONSO_BM_STR "373-0202"
2947#define NIU_FOXXY_BM_STR "501-7961" 2947#define NIU_FOXXY_BM_STR "501-7961"
2948#define NIU_2XGF_MRVL_BM_STR "SK-6E82" 2948#define NIU_2XGF_MRVL_BM_STR "SK-6E82"
2949#define NIU_QGC_LP_MDL_STR "SUNW,pcie-qgc"
2950#define NIU_2XGF_LP_MDL_STR "SUNW,pcie-2xgf"
2951#define NIU_QGC_PEM_MDL_STR "SUNW,pcie-qgc-pem"
2952#define NIU_2XGF_PEM_MDL_STR "SUNW,pcie-2xgf-pem"
2953#define NIU_ALONSO_MDL_STR "SUNW,CP3220"
2954#define NIU_KIMI_MDL_STR "SUNW,CP3260"
2955#define NIU_MARAMBA_MDL_STR "SUNW,pcie-neptune"
2956#define NIU_FOXXY_MDL_STR "SUNW,pcie-rfem"
2957#define NIU_2XGF_MRVL_MDL_STR "SysKonnect,pcie-2xgf"
2949 2958
2950#define NIU_VPD_MIN_MAJOR 3 2959#define NIU_VPD_MIN_MAJOR 3
2951#define NIU_VPD_MIN_MINOR 4 2960#define NIU_VPD_MIN_MINOR 4
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index d0d5585114b0..81fd85214b98 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,12 +22,8 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#ifdef CONFIG_PCNET32_NAPI 25#define DRV_VERSION "1.35"
26#define DRV_VERSION "1.34-NAPI" 26#define DRV_RELDATE "21.Apr.2008"
27#else
28#define DRV_VERSION "1.34"
29#endif
30#define DRV_RELDATE "14.Aug.2007"
31#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
32 28
33static const char *const version = 29static const char *const version =
@@ -445,30 +441,24 @@ static struct pcnet32_access pcnet32_dwio = {
445 441
446static void pcnet32_netif_stop(struct net_device *dev) 442static void pcnet32_netif_stop(struct net_device *dev)
447{ 443{
448#ifdef CONFIG_PCNET32_NAPI
449 struct pcnet32_private *lp = netdev_priv(dev); 444 struct pcnet32_private *lp = netdev_priv(dev);
450#endif 445
451 dev->trans_start = jiffies; 446 dev->trans_start = jiffies;
452#ifdef CONFIG_PCNET32_NAPI
453 napi_disable(&lp->napi); 447 napi_disable(&lp->napi);
454#endif
455 netif_tx_disable(dev); 448 netif_tx_disable(dev);
456} 449}
457 450
458static void pcnet32_netif_start(struct net_device *dev) 451static void pcnet32_netif_start(struct net_device *dev)
459{ 452{
460#ifdef CONFIG_PCNET32_NAPI
461 struct pcnet32_private *lp = netdev_priv(dev); 453 struct pcnet32_private *lp = netdev_priv(dev);
462 ulong ioaddr = dev->base_addr; 454 ulong ioaddr = dev->base_addr;
463 u16 val; 455 u16 val;
464#endif 456
465 netif_wake_queue(dev); 457 netif_wake_queue(dev);
466#ifdef CONFIG_PCNET32_NAPI
467 val = lp->a.read_csr(ioaddr, CSR3); 458 val = lp->a.read_csr(ioaddr, CSR3);
468 val &= 0x00ff; 459 val &= 0x00ff;
469 lp->a.write_csr(ioaddr, CSR3, val); 460 lp->a.write_csr(ioaddr, CSR3, val);
470 napi_enable(&lp->napi); 461 napi_enable(&lp->napi);
471#endif
472} 462}
473 463
474/* 464/*
@@ -911,11 +901,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
911 rc = 1; /* default to fail */ 901 rc = 1; /* default to fail */
912 902
913 if (netif_running(dev)) 903 if (netif_running(dev))
914#ifdef CONFIG_PCNET32_NAPI
915 pcnet32_netif_stop(dev); 904 pcnet32_netif_stop(dev);
916#else
917 pcnet32_close(dev);
918#endif
919 905
920 spin_lock_irqsave(&lp->lock, flags); 906 spin_lock_irqsave(&lp->lock, flags);
921 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ 907 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1046,7 +1032,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
1046 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ 1032 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
1047 a->write_bcr(ioaddr, 32, (x & ~0x0002)); 1033 a->write_bcr(ioaddr, 32, (x & ~0x0002));
1048 1034
1049#ifdef CONFIG_PCNET32_NAPI
1050 if (netif_running(dev)) { 1035 if (netif_running(dev)) {
1051 pcnet32_netif_start(dev); 1036 pcnet32_netif_start(dev);
1052 pcnet32_restart(dev, CSR0_NORMAL); 1037 pcnet32_restart(dev, CSR0_NORMAL);
@@ -1055,16 +1040,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
1055 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ 1040 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1056 } 1041 }
1057 spin_unlock_irqrestore(&lp->lock, flags); 1042 spin_unlock_irqrestore(&lp->lock, flags);
1058#else
1059 if (netif_running(dev)) {
1060 spin_unlock_irqrestore(&lp->lock, flags);
1061 pcnet32_open(dev);
1062 } else {
1063 pcnet32_purge_rx_ring(dev);
1064 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1065 spin_unlock_irqrestore(&lp->lock, flags);
1066 }
1067#endif
1068 1043
1069 return (rc); 1044 return (rc);
1070} /* end pcnet32_loopback_test */ 1045} /* end pcnet32_loopback_test */
@@ -1270,11 +1245,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1270 } 1245 }
1271 dev->stats.rx_bytes += skb->len; 1246 dev->stats.rx_bytes += skb->len;
1272 skb->protocol = eth_type_trans(skb, dev); 1247 skb->protocol = eth_type_trans(skb, dev);
1273#ifdef CONFIG_PCNET32_NAPI
1274 netif_receive_skb(skb); 1248 netif_receive_skb(skb);
1275#else
1276 netif_rx(skb);
1277#endif
1278 dev->last_rx = jiffies; 1249 dev->last_rx = jiffies;
1279 dev->stats.rx_packets++; 1250 dev->stats.rx_packets++;
1280 return; 1251 return;
@@ -1403,7 +1374,6 @@ static int pcnet32_tx(struct net_device *dev)
1403 return must_restart; 1374 return must_restart;
1404} 1375}
1405 1376
1406#ifdef CONFIG_PCNET32_NAPI
1407static int pcnet32_poll(struct napi_struct *napi, int budget) 1377static int pcnet32_poll(struct napi_struct *napi, int budget)
1408{ 1378{
1409 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); 1379 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
@@ -1442,7 +1412,6 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
1442 } 1412 }
1443 return work_done; 1413 return work_done;
1444} 1414}
1445#endif
1446 1415
1447#define PCNET32_REGS_PER_PHY 32 1416#define PCNET32_REGS_PER_PHY 32
1448#define PCNET32_MAX_PHYS 32 1417#define PCNET32_MAX_PHYS 32
@@ -1864,9 +1833,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1864 /* napi.weight is used in both the napi and non-napi cases */ 1833 /* napi.weight is used in both the napi and non-napi cases */
1865 lp->napi.weight = lp->rx_ring_size / 2; 1834 lp->napi.weight = lp->rx_ring_size / 2;
1866 1835
1867#ifdef CONFIG_PCNET32_NAPI
1868 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); 1836 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
1869#endif
1870 1837
1871 if (fdx && !(lp->options & PCNET32_PORT_ASEL) && 1838 if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1872 ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) 1839 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
@@ -2297,9 +2264,7 @@ static int pcnet32_open(struct net_device *dev)
2297 goto err_free_ring; 2264 goto err_free_ring;
2298 } 2265 }
2299 2266
2300#ifdef CONFIG_PCNET32_NAPI
2301 napi_enable(&lp->napi); 2267 napi_enable(&lp->napi);
2302#endif
2303 2268
2304 /* Re-initialize the PCNET32, and start it when done. */ 2269 /* Re-initialize the PCNET32, and start it when done. */
2305 lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); 2270 lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2623,7 +2588,6 @@ pcnet32_interrupt(int irq, void *dev_id)
2623 dev->name, csr0); 2588 dev->name, csr0);
2624 /* unlike for the lance, there is no restart needed */ 2589 /* unlike for the lance, there is no restart needed */
2625 } 2590 }
2626#ifdef CONFIG_PCNET32_NAPI
2627 if (netif_rx_schedule_prep(dev, &lp->napi)) { 2591 if (netif_rx_schedule_prep(dev, &lp->napi)) {
2628 u16 val; 2592 u16 val;
2629 /* set interrupt masks */ 2593 /* set interrupt masks */
@@ -2634,24 +2598,9 @@ pcnet32_interrupt(int irq, void *dev_id)
2634 __netif_rx_schedule(dev, &lp->napi); 2598 __netif_rx_schedule(dev, &lp->napi);
2635 break; 2599 break;
2636 } 2600 }
2637#else
2638 pcnet32_rx(dev, lp->napi.weight);
2639 if (pcnet32_tx(dev)) {
2640 /* reset the chip to clear the error condition, then restart */
2641 lp->a.reset(ioaddr);
2642 lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
2643 pcnet32_restart(dev, CSR0_START);
2644 netif_wake_queue(dev);
2645 }
2646#endif
2647 csr0 = lp->a.read_csr(ioaddr, CSR0); 2601 csr0 = lp->a.read_csr(ioaddr, CSR0);
2648 } 2602 }
2649 2603
2650#ifndef CONFIG_PCNET32_NAPI
2651 /* Set interrupt enable. */
2652 lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
2653#endif
2654
2655 if (netif_msg_intr(lp)) 2604 if (netif_msg_intr(lp))
2656 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", 2605 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
2657 dev->name, lp->a.read_csr(ioaddr, CSR0)); 2606 dev->name, lp->a.read_csr(ioaddr, CSR0));
@@ -2670,9 +2619,7 @@ static int pcnet32_close(struct net_device *dev)
2670 del_timer_sync(&lp->watchdog_timer); 2619 del_timer_sync(&lp->watchdog_timer);
2671 2620
2672 netif_stop_queue(dev); 2621 netif_stop_queue(dev);
2673#ifdef CONFIG_PCNET32_NAPI
2674 napi_disable(&lp->napi); 2622 napi_disable(&lp->napi);
2675#endif
2676 2623
2677 spin_lock_irqsave(&lp->lock, flags); 2624 spin_lock_irqsave(&lp->lock, flags);
2678 2625
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3c18bb594957..45cc2914d347 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -547,7 +547,7 @@ static void phy_force_reduction(struct phy_device *phydev)
547 * Must not be called from interrupt context, or while the 547 * Must not be called from interrupt context, or while the
548 * phydev->lock is held. 548 * phydev->lock is held.
549 */ 549 */
550void phy_error(struct phy_device *phydev) 550static void phy_error(struct phy_device *phydev)
551{ 551{
552 mutex_lock(&phydev->lock); 552 mutex_lock(&phydev->lock);
553 phydev->state = PHY_HALTED; 553 phydev->state = PHY_HALTED;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index d3207c0da895..1f4ca2b54a73 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2458,6 +2458,7 @@ ppp_create_interface(int unit, int *retp)
2458 2458
2459out3: 2459out3:
2460 atomic_dec(&ppp_unit_count); 2460 atomic_dec(&ppp_unit_count);
2461 unregister_netdev(dev);
2461out2: 2462out2:
2462 mutex_unlock(&all_ppp_mutex); 2463 mutex_unlock(&all_ppp_mutex);
2463 free_netdev(dev); 2464 free_netdev(dev);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 244d7830c92a..79359919335b 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -1621,9 +1621,16 @@ out_no_ppp:
1621end: 1621end:
1622 release_sock(sk); 1622 release_sock(sk);
1623 1623
1624 if (error != 0) 1624 if (error != 0) {
1625 PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, 1625 if (session)
1626 "%s: connect failed: %d\n", session->name, error); 1626 PRINTK(session->debug,
1627 PPPOL2TP_MSG_CONTROL, KERN_WARNING,
1628 "%s: connect failed: %d\n",
1629 session->name, error);
1630 else
1631 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
1632 "connect failed: %d\n", error);
1633 }
1627 1634
1628 return error; 1635 return error;
1629} 1636}
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 0d32123085e9..1dae1f2ed813 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -2474,6 +2474,8 @@ static void gelic_wl_free(struct gelic_wl_info *wl)
2474 2474
2475 pr_debug("%s: <-\n", __func__); 2475 pr_debug("%s: <-\n", __func__);
2476 2476
2477 free_page((unsigned long)wl->buf);
2478
2477 pr_debug("%s: destroy queues\n", __func__); 2479 pr_debug("%s: destroy queues\n", __func__);
2478 destroy_workqueue(wl->eurus_cmd_queue); 2480 destroy_workqueue(wl->eurus_cmd_queue);
2479 destroy_workqueue(wl->event_queue); 2481 destroy_workqueue(wl->event_queue);
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 0f023447eafd..1d2daeec7ac1 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,5 +1,5 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ 1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \
2 i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \ 2 i2c-direct.o selftest.o ethtool.o xfp_phy.o \
3 tenxpress.o boards.o sfe4001.o 3 mdio_10g.o tenxpress.o boards.o sfe4001.o
4 4
5obj-$(CONFIG_SFC) += sfc.o 5obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index f56341d428e1..695764dc2e64 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -22,5 +22,7 @@ enum efx_board_type {
22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); 22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
23extern int sfe4001_poweron(struct efx_nic *efx); 23extern int sfe4001_poweron(struct efx_nic *efx);
24extern void sfe4001_poweroff(struct efx_nic *efx); 24extern void sfe4001_poweroff(struct efx_nic *efx);
25/* Are we putting the PHY into flash config mode */
26extern unsigned int sfe4001_phy_flash_cfg;
25 27
26#endif 28#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 59edcf793c19..418f2e53a95b 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1873,6 +1873,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1873 tx_queue->queue = i; 1873 tx_queue->queue = i;
1874 tx_queue->buffer = NULL; 1874 tx_queue->buffer = NULL;
1875 tx_queue->channel = &efx->channel[0]; /* for safety */ 1875 tx_queue->channel = &efx->channel[0]; /* for safety */
1876 tx_queue->tso_headers_free = NULL;
1876 } 1877 }
1877 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { 1878 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1878 rx_queue = &efx->rx_queue[i]; 1879 rx_queue = &efx->rx_queue[i];
@@ -2071,7 +2072,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2071 net_dev = alloc_etherdev(sizeof(*efx)); 2072 net_dev = alloc_etherdev(sizeof(*efx));
2072 if (!net_dev) 2073 if (!net_dev)
2073 return -ENOMEM; 2074 return -ENOMEM;
2074 net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 2075 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2076 NETIF_F_HIGHDMA | NETIF_F_TSO);
2075 if (lro) 2077 if (lro)
2076 net_dev->features |= NETIF_F_LRO; 2078 net_dev->features |= NETIF_F_LRO;
2077 efx = net_dev->priv; 2079 efx = net_dev->priv;
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index 43663a4619da..c53290d08e2b 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -10,6 +10,55 @@
10#ifndef EFX_ENUM_H 10#ifndef EFX_ENUM_H
11#define EFX_ENUM_H 11#define EFX_ENUM_H
12 12
13/**
14 * enum efx_loopback_mode - loopback modes
15 * @LOOPBACK_NONE: no loopback
16 * @LOOPBACK_XGMII: loopback within MAC at XGMII level
17 * @LOOPBACK_XGXS: loopback within MAC at XGXS level
18 * @LOOPBACK_XAUI: loopback within MAC at XAUI level
19 * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level
20 * @LOOPBACK_PCS: loopback within PHY at PCS level
21 * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level
22 * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!)
23 */
24/* Please keep in order and up-to-date w.r.t the following two #defines */
25enum efx_loopback_mode {
26 LOOPBACK_NONE = 0,
27 LOOPBACK_MAC = 1,
28 LOOPBACK_XGMII = 2,
29 LOOPBACK_XGXS = 3,
30 LOOPBACK_XAUI = 4,
31 LOOPBACK_PHY = 5,
32 LOOPBACK_PHYXS = 6,
33 LOOPBACK_PCS = 7,
34 LOOPBACK_PMAPMD = 8,
35 LOOPBACK_NETWORK = 9,
36 LOOPBACK_MAX
37};
38
39#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
40
41extern const char *efx_loopback_mode_names[];
42#define LOOPBACK_MODE_NAME(mode) \
43 STRING_TABLE_LOOKUP(mode, efx_loopback_mode)
44#define LOOPBACK_MODE(efx) \
45 LOOPBACK_MODE_NAME(efx->loopback_mode)
46
47/* These loopbacks occur within the controller */
48#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \
49 (1 << LOOPBACK_XGXS) | \
50 (1 << LOOPBACK_XAUI))
51
52#define LOOPBACK_MASK(_efx) \
53 (1 << (_efx)->loopback_mode)
54
55#define LOOPBACK_INTERNAL(_efx) \
56 ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0)
57
58#define LOOPBACK_OUT_OF(_from, _to, _mask) \
59 (((LOOPBACK_MASK(_from) & (_mask)) && \
60 ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
61
13/*****************************************************************************/ 62/*****************************************************************************/
14 63
15/** 64/**
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index ad541badbd98..e2c75d101610 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -12,12 +12,26 @@
12#include <linux/ethtool.h> 12#include <linux/ethtool.h>
13#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
14#include "net_driver.h" 14#include "net_driver.h"
15#include "selftest.h"
15#include "efx.h" 16#include "efx.h"
16#include "ethtool.h" 17#include "ethtool.h"
17#include "falcon.h" 18#include "falcon.h"
18#include "gmii.h" 19#include "gmii.h"
19#include "mac.h" 20#include "mac.h"
20 21
22const char *efx_loopback_mode_names[] = {
23 [LOOPBACK_NONE] = "NONE",
24 [LOOPBACK_MAC] = "MAC",
25 [LOOPBACK_XGMII] = "XGMII",
26 [LOOPBACK_XGXS] = "XGXS",
27 [LOOPBACK_XAUI] = "XAUI",
28 [LOOPBACK_PHY] = "PHY",
29 [LOOPBACK_PHYXS] = "PHY(XS)",
30 [LOOPBACK_PCS] = "PHY(PCS)",
31 [LOOPBACK_PMAPMD] = "PHY(PMAPMD)",
32 [LOOPBACK_NETWORK] = "NETWORK",
33};
34
21static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); 35static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
22 36
23struct ethtool_string { 37struct ethtool_string {
@@ -217,23 +231,179 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
217 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 231 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
218} 232}
219 233
234/**
235 * efx_fill_test - fill in an individual self-test entry
236 * @test_index: Index of the test
237 * @strings: Ethtool strings, or %NULL
238 * @data: Ethtool test results, or %NULL
239 * @test: Pointer to test result (used only if data != %NULL)
240 * @unit_format: Unit name format (e.g. "channel\%d")
241 * @unit_id: Unit id (e.g. 0 for "channel0")
242 * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
243 * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent")
244 *
245 * Fill in an individual self-test entry.
246 */
247static void efx_fill_test(unsigned int test_index,
248 struct ethtool_string *strings, u64 *data,
249 int *test, const char *unit_format, int unit_id,
250 const char *test_format, const char *test_id)
251{
252 struct ethtool_string unit_str, test_str;
253
254 /* Fill data value, if applicable */
255 if (data)
256 data[test_index] = *test;
257
258 /* Fill string, if applicable */
259 if (strings) {
260 snprintf(unit_str.name, sizeof(unit_str.name),
261 unit_format, unit_id);
262 snprintf(test_str.name, sizeof(test_str.name),
263 test_format, test_id);
264 snprintf(strings[test_index].name,
265 sizeof(strings[test_index].name),
266 "%-9s%-17s", unit_str.name, test_str.name);
267 }
268}
269
270#define EFX_PORT_NAME "port%d", 0
271#define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel
272#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
273#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
274#define EFX_LOOPBACK_NAME(_mode, _counter) \
275 "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode)
276
277/**
278 * efx_fill_loopback_test - fill in a block of loopback self-test entries
279 * @efx: Efx NIC
280 * @lb_tests: Efx loopback self-test results structure
281 * @mode: Loopback test mode
282 * @test_index: Starting index of the test
283 * @strings: Ethtool strings, or %NULL
284 * @data: Ethtool test results, or %NULL
285 */
286static int efx_fill_loopback_test(struct efx_nic *efx,
287 struct efx_loopback_self_tests *lb_tests,
288 enum efx_loopback_mode mode,
289 unsigned int test_index,
290 struct ethtool_string *strings, u64 *data)
291{
292 struct efx_tx_queue *tx_queue;
293
294 efx_for_each_tx_queue(tx_queue, efx) {
295 efx_fill_test(test_index++, strings, data,
296 &lb_tests->tx_sent[tx_queue->queue],
297 EFX_TX_QUEUE_NAME(tx_queue),
298 EFX_LOOPBACK_NAME(mode, "tx_sent"));
299 efx_fill_test(test_index++, strings, data,
300 &lb_tests->tx_done[tx_queue->queue],
301 EFX_TX_QUEUE_NAME(tx_queue),
302 EFX_LOOPBACK_NAME(mode, "tx_done"));
303 }
304 efx_fill_test(test_index++, strings, data,
305 &lb_tests->rx_good,
306 EFX_PORT_NAME,
307 EFX_LOOPBACK_NAME(mode, "rx_good"));
308 efx_fill_test(test_index++, strings, data,
309 &lb_tests->rx_bad,
310 EFX_PORT_NAME,
311 EFX_LOOPBACK_NAME(mode, "rx_bad"));
312
313 return test_index;
314}
315
316/**
317 * efx_ethtool_fill_self_tests - get self-test details
318 * @efx: Efx NIC
319 * @tests: Efx self-test results structure, or %NULL
320 * @strings: Ethtool strings, or %NULL
321 * @data: Ethtool test results, or %NULL
322 */
323static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
324 struct efx_self_tests *tests,
325 struct ethtool_string *strings,
326 u64 *data)
327{
328 struct efx_channel *channel;
329 unsigned int n = 0;
330 enum efx_loopback_mode mode;
331
332 /* Interrupt */
333 efx_fill_test(n++, strings, data, &tests->interrupt,
334 "core", 0, "interrupt", NULL);
335
336 /* Event queues */
337 efx_for_each_channel(channel, efx) {
338 efx_fill_test(n++, strings, data,
339 &tests->eventq_dma[channel->channel],
340 EFX_CHANNEL_NAME(channel),
341 "eventq.dma", NULL);
342 efx_fill_test(n++, strings, data,
343 &tests->eventq_int[channel->channel],
344 EFX_CHANNEL_NAME(channel),
345 "eventq.int", NULL);
346 efx_fill_test(n++, strings, data,
347 &tests->eventq_poll[channel->channel],
348 EFX_CHANNEL_NAME(channel),
349 "eventq.poll", NULL);
350 }
351
352 /* PHY presence */
353 efx_fill_test(n++, strings, data, &tests->phy_ok,
354 EFX_PORT_NAME, "phy_ok", NULL);
355
356 /* Loopback tests */
357 efx_fill_test(n++, strings, data, &tests->loopback_speed,
358 EFX_PORT_NAME, "loopback.speed", NULL);
359 efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
360 EFX_PORT_NAME, "loopback.full_duplex", NULL);
361 for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
362 if (!(efx->loopback_modes & (1 << mode)))
363 continue;
364 n = efx_fill_loopback_test(efx,
365 &tests->loopback[mode], mode, n,
366 strings, data);
367 }
368
369 return n;
370}
371
220static int efx_ethtool_get_stats_count(struct net_device *net_dev) 372static int efx_ethtool_get_stats_count(struct net_device *net_dev)
221{ 373{
222 return EFX_ETHTOOL_NUM_STATS; 374 return EFX_ETHTOOL_NUM_STATS;
223} 375}
224 376
377static int efx_ethtool_self_test_count(struct net_device *net_dev)
378{
379 struct efx_nic *efx = net_dev->priv;
380
381 return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
382}
383
225static void efx_ethtool_get_strings(struct net_device *net_dev, 384static void efx_ethtool_get_strings(struct net_device *net_dev,
226 u32 string_set, u8 *strings) 385 u32 string_set, u8 *strings)
227{ 386{
387 struct efx_nic *efx = net_dev->priv;
228 struct ethtool_string *ethtool_strings = 388 struct ethtool_string *ethtool_strings =
229 (struct ethtool_string *)strings; 389 (struct ethtool_string *)strings;
230 int i; 390 int i;
231 391
232 if (string_set == ETH_SS_STATS) 392 switch (string_set) {
393 case ETH_SS_STATS:
233 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) 394 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
234 strncpy(ethtool_strings[i].name, 395 strncpy(ethtool_strings[i].name,
235 efx_ethtool_stats[i].name, 396 efx_ethtool_stats[i].name,
236 sizeof(ethtool_strings[i].name)); 397 sizeof(ethtool_strings[i].name));
398 break;
399 case ETH_SS_TEST:
400 efx_ethtool_fill_self_tests(efx, NULL,
401 ethtool_strings, NULL);
402 break;
403 default:
404 /* No other string sets */
405 break;
406 }
237} 407}
238 408
239static void efx_ethtool_get_stats(struct net_device *net_dev, 409static void efx_ethtool_get_stats(struct net_device *net_dev,
@@ -272,6 +442,22 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
272 } 442 }
273} 443}
274 444
445static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
446{
447 int rc;
448
449 /* Our TSO requires TX checksumming, so force TX checksumming
450 * on when TSO is enabled.
451 */
452 if (enable) {
453 rc = efx_ethtool_set_tx_csum(net_dev, 1);
454 if (rc)
455 return rc;
456 }
457
458 return ethtool_op_set_tso(net_dev, enable);
459}
460
275static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) 461static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
276{ 462{
277 struct efx_nic *efx = net_dev->priv; 463 struct efx_nic *efx = net_dev->priv;
@@ -283,6 +469,15 @@ static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
283 469
284 efx_flush_queues(efx); 470 efx_flush_queues(efx);
285 471
472 /* Our TSO requires TX checksumming, so disable TSO when
473 * checksumming is disabled
474 */
475 if (!enable) {
476 rc = efx_ethtool_set_tso(net_dev, 0);
477 if (rc)
478 return rc;
479 }
480
286 return 0; 481 return 0;
287} 482}
288 483
@@ -305,6 +500,64 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
305 return efx->rx_checksum_enabled; 500 return efx->rx_checksum_enabled;
306} 501}
307 502
503static void efx_ethtool_self_test(struct net_device *net_dev,
504 struct ethtool_test *test, u64 *data)
505{
506 struct efx_nic *efx = net_dev->priv;
507 struct efx_self_tests efx_tests;
508 int offline, already_up;
509 int rc;
510
511 ASSERT_RTNL();
512 if (efx->state != STATE_RUNNING) {
513 rc = -EIO;
514 goto fail1;
515 }
516
517 /* We need rx buffers and interrupts. */
518 already_up = (efx->net_dev->flags & IFF_UP);
519 if (!already_up) {
520 rc = dev_open(efx->net_dev);
521 if (rc) {
522 EFX_ERR(efx, "failed opening device.\n");
523 goto fail2;
524 }
525 }
526
527 memset(&efx_tests, 0, sizeof(efx_tests));
528 offline = (test->flags & ETH_TEST_FL_OFFLINE);
529
530 /* Perform online self tests first */
531 rc = efx_online_test(efx, &efx_tests);
532 if (rc)
533 goto out;
534
535 /* Perform offline tests only if online tests passed */
536 if (offline) {
537 /* Stop the kernel from sending packets during the test. */
538 efx_stop_queue(efx);
539 rc = efx_flush_queues(efx);
540 if (!rc)
541 rc = efx_offline_test(efx, &efx_tests,
542 efx->loopback_modes);
543 efx_wake_queue(efx);
544 }
545
546 out:
547 if (!already_up)
548 dev_close(efx->net_dev);
549
550 EFX_LOG(efx, "%s all %sline self-tests\n",
551 rc == 0 ? "passed" : "failed", offline ? "off" : "on");
552
553 fail2:
554 fail1:
555 /* Fill ethtool results structures */
556 efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
557 if (rc)
558 test->flags |= ETH_TEST_FL_FAILED;
559}
560
308/* Restart autonegotiation */ 561/* Restart autonegotiation */
309static int efx_ethtool_nway_reset(struct net_device *net_dev) 562static int efx_ethtool_nway_reset(struct net_device *net_dev)
310{ 563{
@@ -451,8 +704,12 @@ struct ethtool_ops efx_ethtool_ops = {
451 .set_tx_csum = efx_ethtool_set_tx_csum, 704 .set_tx_csum = efx_ethtool_set_tx_csum,
452 .get_sg = ethtool_op_get_sg, 705 .get_sg = ethtool_op_get_sg,
453 .set_sg = ethtool_op_set_sg, 706 .set_sg = ethtool_op_set_sg,
707 .get_tso = ethtool_op_get_tso,
708 .set_tso = efx_ethtool_set_tso,
454 .get_flags = ethtool_op_get_flags, 709 .get_flags = ethtool_op_get_flags,
455 .set_flags = ethtool_op_set_flags, 710 .set_flags = ethtool_op_set_flags,
711 .self_test_count = efx_ethtool_self_test_count,
712 .self_test = efx_ethtool_self_test,
456 .get_strings = efx_ethtool_get_strings, 713 .get_strings = efx_ethtool_get_strings,
457 .phys_id = efx_ethtool_phys_id, 714 .phys_id = efx_ethtool_phys_id,
458 .get_stats_count = efx_ethtool_get_stats_count, 715 .get_stats_count = efx_ethtool_get_stats_count,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 46db549ce580..b57cc68058c0 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1129,6 +1129,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
1129 case RX_RECOVERY_EV_DECODE: 1129 case RX_RECOVERY_EV_DECODE:
1130 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " 1130 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
1131 "Resetting.\n", channel->channel); 1131 "Resetting.\n", channel->channel);
1132 atomic_inc(&efx->rx_reset);
1132 efx_schedule_reset(efx, 1133 efx_schedule_reset(efx,
1133 EFX_WORKAROUND_6555(efx) ? 1134 EFX_WORKAROUND_6555(efx) ?
1134 RESET_TYPE_RX_RECOVERY : 1135 RESET_TYPE_RX_RECOVERY :
@@ -1731,7 +1732,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1731 efx_oword_t temp; 1732 efx_oword_t temp;
1732 int count; 1733 int count;
1733 1734
1734 if (FALCON_REV(efx) < FALCON_REV_B0) 1735 if ((FALCON_REV(efx) < FALCON_REV_B0) ||
1736 (efx->loopback_mode != LOOPBACK_NONE))
1735 return; 1737 return;
1736 1738
1737 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1739 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
@@ -2091,6 +2093,8 @@ static int falcon_probe_phy(struct efx_nic *efx)
2091 efx->phy_type); 2093 efx->phy_type);
2092 return -1; 2094 return -1;
2093 } 2095 }
2096
2097 efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks;
2094 return 0; 2098 return 0;
2095} 2099}
2096 2100
@@ -2468,14 +2472,12 @@ int falcon_probe_nic(struct efx_nic *efx)
2468 fail5: 2472 fail5:
2469 falcon_free_buffer(efx, &efx->irq_status); 2473 falcon_free_buffer(efx, &efx->irq_status);
2470 fail4: 2474 fail4:
2471 /* fall-thru */
2472 fail3: 2475 fail3:
2473 if (nic_data->pci_dev2) { 2476 if (nic_data->pci_dev2) {
2474 pci_dev_put(nic_data->pci_dev2); 2477 pci_dev_put(nic_data->pci_dev2);
2475 nic_data->pci_dev2 = NULL; 2478 nic_data->pci_dev2 = NULL;
2476 } 2479 }
2477 fail2: 2480 fail2:
2478 /* fall-thru */
2479 fail1: 2481 fail1:
2480 kfree(efx->nic_data); 2482 kfree(efx->nic_data);
2481 return rc; 2483 return rc;
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 0485a63eaff6..06e2d68fc3d1 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -636,6 +636,14 @@
636#define XX_HIDRVA_WIDTH 1 636#define XX_HIDRVA_WIDTH 1
637#define XX_LODRVA_LBN 8 637#define XX_LODRVA_LBN 8
638#define XX_LODRVA_WIDTH 1 638#define XX_LODRVA_WIDTH 1
639#define XX_LPBKD_LBN 3
640#define XX_LPBKD_WIDTH 1
641#define XX_LPBKC_LBN 2
642#define XX_LPBKC_WIDTH 1
643#define XX_LPBKB_LBN 1
644#define XX_LPBKB_WIDTH 1
645#define XX_LPBKA_LBN 0
646#define XX_LPBKA_WIDTH 1
639 647
640#define XX_TXDRV_CTL_REG_MAC 0x12 648#define XX_TXDRV_CTL_REG_MAC 0x12
641#define XX_DEQD_LBN 28 649#define XX_DEQD_LBN 28
@@ -656,8 +664,14 @@
656#define XX_DTXA_WIDTH 4 664#define XX_DTXA_WIDTH 4
657 665
658/* XAUI XGXS core status register */ 666/* XAUI XGXS core status register */
659#define XX_FORCE_SIG_DECODE_FORCED 0xff
660#define XX_CORE_STAT_REG_MAC 0x16 667#define XX_CORE_STAT_REG_MAC 0x16
668#define XX_FORCE_SIG_LBN 24
669#define XX_FORCE_SIG_WIDTH 8
670#define XX_FORCE_SIG_DECODE_FORCED 0xff
671#define XX_XGXS_LB_EN_LBN 23
672#define XX_XGXS_LB_EN_WIDTH 1
673#define XX_XGMII_LB_EN_LBN 22
674#define XX_XGMII_LB_EN_WIDTH 1
661#define XX_ALIGN_DONE_LBN 20 675#define XX_ALIGN_DONE_LBN 20
662#define XX_ALIGN_DONE_WIDTH 1 676#define XX_ALIGN_DONE_WIDTH 1
663#define XX_SYNC_STAT_LBN 16 677#define XX_SYNC_STAT_LBN 16
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index aa7521b24a5d..a74b7931a3c4 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -32,7 +32,7 @@
32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) 32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
33 33
34void falcon_xmac_writel(struct efx_nic *efx, 34void falcon_xmac_writel(struct efx_nic *efx,
35 efx_dword_t *value, unsigned int mac_reg) 35 efx_dword_t *value, unsigned int mac_reg)
36{ 36{
37 efx_oword_t temp; 37 efx_oword_t temp;
38 38
@@ -69,6 +69,10 @@ static int falcon_reset_xmac(struct efx_nic *efx)
69 udelay(10); 69 udelay(10);
70 } 70 }
71 71
72 /* This often fails when DSP is disabled, ignore it */
73 if (sfe4001_phy_flash_cfg != 0)
74 return 0;
75
72 EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); 76 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
73 return -ETIMEDOUT; 77 return -ETIMEDOUT;
74} 78}
@@ -223,7 +227,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
223 /* The ISR latches, so clear it and re-read */ 227 /* The ISR latches, so clear it and re-read */
224 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 228 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
225 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 229 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
226 230
227 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || 231 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
228 EFX_DWORD_FIELD(reg, XM_RMTFLT)) { 232 EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
229 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); 233 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
@@ -237,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
237{ 241{
238 efx_dword_t reg; 242 efx_dword_t reg;
239 243
240 if (FALCON_REV(efx) < FALCON_REV_B0) 244 if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
241 return; 245 return;
242 246
243 /* Flush the ISR */ 247 /* Flush the ISR */
@@ -284,6 +288,9 @@ int falcon_xaui_link_ok(struct efx_nic *efx)
284 efx_dword_t reg; 288 efx_dword_t reg;
285 int align_done, sync_status, link_ok = 0; 289 int align_done, sync_status, link_ok = 0;
286 290
291 if (LOOPBACK_INTERNAL(efx))
292 return 1;
293
287 /* Read link status */ 294 /* Read link status */
288 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 295 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
289 296
@@ -374,6 +381,61 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
374 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC); 381 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
375} 382}
376 383
384static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
385{
386 efx_dword_t reg;
387 int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0;
388 int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0;
389 int xgmii_loopback =
390 (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
391
392 /* XGXS block is flaky and will need to be reset if moving
393 * into our out of XGMII, XGXS or XAUI loopbacks. */
394 if (EFX_WORKAROUND_5147(efx)) {
395 int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
396 int reset_xgxs;
397
398 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
399 old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN);
400 old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN);
401
402 falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
403 old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA);
404
405 /* The PHY driver may have turned XAUI off */
406 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
407 (xaui_loopback != old_xaui_loopback) ||
408 (xgmii_loopback != old_xgmii_loopback));
409 if (reset_xgxs) {
410 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
411 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
412 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
413 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
414 udelay(1);
415 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
416 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
417 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
418 udelay(1);
419 }
420 }
421
422 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
423 EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG,
424 (xgxs_loopback || xaui_loopback) ?
425 XX_FORCE_SIG_DECODE_FORCED : 0);
426 EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
427 EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
428 falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
429
430 falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
431 EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
432 EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
433 EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
434 EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
435 falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC);
436}
437
438
377/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails 439/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
378 * to come back up. Bash it until it comes back up */ 440 * to come back up. Bash it until it comes back up */
379static int falcon_check_xaui_link_up(struct efx_nic *efx) 441static int falcon_check_xaui_link_up(struct efx_nic *efx)
@@ -382,7 +444,8 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
382 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; 444 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
383 max_tries = tries; 445 max_tries = tries;
384 446
385 if (efx->phy_type == PHY_TYPE_NONE) 447 if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
448 (efx->phy_type == PHY_TYPE_NONE))
386 return 0; 449 return 0;
387 450
388 while (tries) { 451 while (tries) {
@@ -408,8 +471,13 @@ void falcon_reconfigure_xmac(struct efx_nic *efx)
408 falcon_mask_status_intr(efx, 0); 471 falcon_mask_status_intr(efx, 0);
409 472
410 falcon_deconfigure_mac_wrapper(efx); 473 falcon_deconfigure_mac_wrapper(efx);
474
475 efx->tx_disabled = LOOPBACK_INTERNAL(efx);
411 efx->phy_op->reconfigure(efx); 476 efx->phy_op->reconfigure(efx);
477
478 falcon_reconfigure_xgxs_core(efx);
412 falcon_reconfigure_xmac_core(efx); 479 falcon_reconfigure_xmac_core(efx);
480
413 falcon_reconfigure_mac_wrapper(efx); 481 falcon_reconfigure_mac_wrapper(efx);
414 482
415 /* Ensure XAUI link is up */ 483 /* Ensure XAUI link is up */
@@ -491,13 +559,15 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
491 (mac_stats->rx_bytes - mac_stats->rx_good_bytes); 559 (mac_stats->rx_bytes - mac_stats->rx_good_bytes);
492} 560}
493 561
494#define EFX_XAUI_RETRAIN_MAX 8
495
496int falcon_check_xmac(struct efx_nic *efx) 562int falcon_check_xmac(struct efx_nic *efx)
497{ 563{
498 unsigned xaui_link_ok; 564 unsigned xaui_link_ok;
499 int rc; 565 int rc;
500 566
567 if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
568 (efx->phy_type == PHY_TYPE_NONE))
569 return 0;
570
501 falcon_mask_status_intr(efx, 0); 571 falcon_mask_status_intr(efx, 0);
502 xaui_link_ok = falcon_xaui_link_ok(efx); 572 xaui_link_ok = falcon_xaui_link_ok(efx);
503 573
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index dc06bb0aa575..c4f540e93b79 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -44,6 +44,9 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
44 int status; 44 int status;
45 int phy_id = efx->mii.phy_id; 45 int phy_id = efx->mii.phy_id;
46 46
47 if (LOOPBACK_INTERNAL(efx))
48 return 0;
49
47 /* Read MMD STATUS2 to check it is responding. */ 50 /* Read MMD STATUS2 to check it is responding. */
48 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); 51 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
49 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & 52 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
@@ -164,6 +167,22 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
164 int mmd = 0; 167 int mmd = 0;
165 int good; 168 int good;
166 169
170 /* If the port is in loopback, then we should only consider a subset
171 * of mmd's */
172 if (LOOPBACK_INTERNAL(efx))
173 return 1;
174 else if (efx->loopback_mode == LOOPBACK_NETWORK)
175 return 0;
176 else if (efx->loopback_mode == LOOPBACK_PHYXS)
177 mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
178 MDIO_MMDREG_DEVS0_PCS |
179 MDIO_MMDREG_DEVS0_PMAPMD);
180 else if (efx->loopback_mode == LOOPBACK_PCS)
181 mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS |
182 MDIO_MMDREG_DEVS0_PMAPMD);
183 else if (efx->loopback_mode == LOOPBACK_PMAPMD)
184 mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD;
185
167 while (mmd_mask) { 186 while (mmd_mask) {
168 if (mmd_mask & 1) { 187 if (mmd_mask & 1) {
169 /* Double reads because link state is latched, and a 188 /* Double reads because link state is latched, and a
@@ -182,6 +201,65 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
182 return ok; 201 return ok;
183} 202}
184 203
204void mdio_clause45_transmit_disable(struct efx_nic *efx)
205{
206 int phy_id = efx->mii.phy_id;
207 int ctrl1, ctrl2;
208
209 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
210 MDIO_MMDREG_TXDIS);
211 if (efx->tx_disabled)
212 ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
213 else
214 ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
215 if (ctrl1 != ctrl2)
216 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
217 MDIO_MMDREG_TXDIS, ctrl2);
218}
219
220void mdio_clause45_phy_reconfigure(struct efx_nic *efx)
221{
222 int phy_id = efx->mii.phy_id;
223 int ctrl1, ctrl2;
224
225 /* Handle (with debouncing) PMA/PMD loopback */
226 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
227 MDIO_MMDREG_CTRL1);
228
229 if (efx->loopback_mode == LOOPBACK_PMAPMD)
230 ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
231 else
232 ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
233
234 if (ctrl1 != ctrl2)
235 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
236 MDIO_MMDREG_CTRL1, ctrl2);
237
238 /* Handle (with debouncing) PCS loopback */
239 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
240 MDIO_MMDREG_CTRL1);
241 if (efx->loopback_mode == LOOPBACK_PCS)
242 ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
243 else
244 ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
245
246 if (ctrl1 != ctrl2)
247 mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS,
248 MDIO_MMDREG_CTRL1, ctrl2);
249
250 /* Handle (with debouncing) PHYXS network loopback */
251 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
252 MDIO_MMDREG_CTRL1);
253 if (efx->loopback_mode == LOOPBACK_NETWORK)
254 ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
255 else
256 ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
257
258 if (ctrl1 != ctrl2)
259 mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
260 MDIO_MMDREG_CTRL1, ctrl2);
261}
262
185/** 263/**
186 * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO. 264 * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
187 * @efx: Efx NIC 265 * @efx: Efx NIC
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 2214b6d820a7..cb99f3f4491c 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -44,11 +44,16 @@
44#define MDIO_MMDREG_DEVS1 (6) 44#define MDIO_MMDREG_DEVS1 (6)
45#define MDIO_MMDREG_CTRL2 (7) 45#define MDIO_MMDREG_CTRL2 (7)
46#define MDIO_MMDREG_STAT2 (8) 46#define MDIO_MMDREG_STAT2 (8)
47#define MDIO_MMDREG_TXDIS (9)
47 48
48/* Bits in MMDREG_CTRL1 */ 49/* Bits in MMDREG_CTRL1 */
49/* Reset */ 50/* Reset */
50#define MDIO_MMDREG_CTRL1_RESET_LBN (15) 51#define MDIO_MMDREG_CTRL1_RESET_LBN (15)
51#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) 52#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1)
53/* Loopback */
54/* Loopback bit for WIS, PCS, PHYSX and DTEXS */
55#define MDIO_MMDREG_CTRL1_LBACK_LBN (14)
56#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1)
52 57
53/* Bits in MMDREG_STAT1 */ 58/* Bits in MMDREG_STAT1 */
54#define MDIO_MMDREG_STAT1_FAULT_LBN (7) 59#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
@@ -56,6 +61,9 @@
56/* Link state */ 61/* Link state */
57#define MDIO_MMDREG_STAT1_LINK_LBN (2) 62#define MDIO_MMDREG_STAT1_LINK_LBN (2)
58#define MDIO_MMDREG_STAT1_LINK_WIDTH (1) 63#define MDIO_MMDREG_STAT1_LINK_WIDTH (1)
64/* Low power ability */
65#define MDIO_MMDREG_STAT1_LPABLE_LBN (1)
66#define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1)
59 67
60/* Bits in ID reg */ 68/* Bits in ID reg */
61#define MDIO_ID_REV(_id32) (_id32 & 0xf) 69#define MDIO_ID_REV(_id32) (_id32 & 0xf)
@@ -76,6 +84,14 @@
76#define MDIO_MMDREG_STAT2_PRESENT_LBN (14) 84#define MDIO_MMDREG_STAT2_PRESENT_LBN (14)
77#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) 85#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
78 86
87/* Bits in MMDREG_TXDIS */
88#define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0)
89#define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1)
90
91/* MMD-specific bits, ordered by MMD, then register */
92#define MDIO_PMAPMD_CTRL1_LBACK_LBN (0)
93#define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1)
94
79/* PMA type (4 bits) */ 95/* PMA type (4 bits) */
80#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) 96#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0)
81#define MDIO_PMAPMD_CTRL2_10G_EW (0x1) 97#define MDIO_PMAPMD_CTRL2_10G_EW (0x1)
@@ -95,7 +111,7 @@
95#define MDIO_PMAPMD_CTRL2_10_BT (0xf) 111#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
96#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) 112#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
97 113
98/* /\* PHY XGXS lane state *\/ */ 114/* PHY XGXS lane state */
99#define MDIO_PHYXS_LANE_STATE (0x18) 115#define MDIO_PHYXS_LANE_STATE (0x18)
100#define MDIO_PHYXS_LANE_ALIGNED_LBN (12) 116#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
101 117
@@ -217,6 +233,12 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
217extern int mdio_clause45_links_ok(struct efx_nic *efx, 233extern int mdio_clause45_links_ok(struct efx_nic *efx,
218 unsigned int mmd_mask); 234 unsigned int mmd_mask);
219 235
236/* Generic transmit disable support though PMAPMD */
237extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
238
239/* Generic part of reconfigure: set/clear loopback bits */
240extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx);
241
220/* Read (some of) the PHY settings over MDIO */ 242/* Read (some of) the PHY settings over MDIO */
221extern void mdio_clause45_get_settings(struct efx_nic *efx, 243extern void mdio_clause45_get_settings(struct efx_nic *efx,
222 struct ethtool_cmd *ecmd); 244 struct ethtool_cmd *ecmd);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index c505482c2520..59f261b4171f 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -134,6 +134,8 @@ struct efx_special_buffer {
134 * Set only on the final fragment of a packet; %NULL for all other 134 * Set only on the final fragment of a packet; %NULL for all other
135 * fragments. When this fragment completes, then we can free this 135 * fragments. When this fragment completes, then we can free this
136 * skb. 136 * skb.
137 * @tsoh: The associated TSO header structure, or %NULL if this
138 * buffer is not a TSO header.
137 * @dma_addr: DMA address of the fragment. 139 * @dma_addr: DMA address of the fragment.
138 * @len: Length of this fragment. 140 * @len: Length of this fragment.
139 * This field is zero when the queue slot is empty. 141 * This field is zero when the queue slot is empty.
@@ -144,6 +146,7 @@ struct efx_special_buffer {
144 */ 146 */
145struct efx_tx_buffer { 147struct efx_tx_buffer {
146 const struct sk_buff *skb; 148 const struct sk_buff *skb;
149 struct efx_tso_header *tsoh;
147 dma_addr_t dma_addr; 150 dma_addr_t dma_addr;
148 unsigned short len; 151 unsigned short len;
149 unsigned char continuation; 152 unsigned char continuation;
@@ -187,6 +190,13 @@ struct efx_tx_buffer {
187 * variable indicates that the queue is full. This is to 190 * variable indicates that the queue is full. This is to
188 * avoid cache-line ping-pong between the xmit path and the 191 * avoid cache-line ping-pong between the xmit path and the
189 * completion path. 192 * completion path.
193 * @tso_headers_free: A list of TSO headers allocated for this TX queue
194 * that are not in use, and so available for new TSO sends. The list
195 * is protected by the TX queue lock.
196 * @tso_bursts: Number of times TSO xmit invoked by kernel
197 * @tso_long_headers: Number of packets with headers too long for standard
198 * blocks
199 * @tso_packets: Number of packets via the TSO xmit path
190 */ 200 */
191struct efx_tx_queue { 201struct efx_tx_queue {
192 /* Members which don't change on the fast path */ 202 /* Members which don't change on the fast path */
@@ -206,6 +216,10 @@ struct efx_tx_queue {
206 unsigned int insert_count ____cacheline_aligned_in_smp; 216 unsigned int insert_count ____cacheline_aligned_in_smp;
207 unsigned int write_count; 217 unsigned int write_count;
208 unsigned int old_read_count; 218 unsigned int old_read_count;
219 struct efx_tso_header *tso_headers_free;
220 unsigned int tso_bursts;
221 unsigned int tso_long_headers;
222 unsigned int tso_packets;
209}; 223};
210 224
211/** 225/**
@@ -434,6 +448,9 @@ struct efx_board {
434 struct efx_blinker blinker; 448 struct efx_blinker blinker;
435}; 449};
436 450
451#define STRING_TABLE_LOOKUP(val, member) \
452 member ## _names[val]
453
437enum efx_int_mode { 454enum efx_int_mode {
438 /* Be careful if altering to correct macro below */ 455 /* Be careful if altering to correct macro below */
439 EFX_INT_MODE_MSIX = 0, 456 EFX_INT_MODE_MSIX = 0,
@@ -506,6 +523,7 @@ enum efx_fc_type {
506 * @check_hw: Check hardware 523 * @check_hw: Check hardware
507 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) 524 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
508 * @mmds: MMD presence mask 525 * @mmds: MMD presence mask
526 * @loopbacks: Supported loopback modes mask
509 */ 527 */
510struct efx_phy_operations { 528struct efx_phy_operations {
511 int (*init) (struct efx_nic *efx); 529 int (*init) (struct efx_nic *efx);
@@ -515,6 +533,7 @@ struct efx_phy_operations {
515 int (*check_hw) (struct efx_nic *efx); 533 int (*check_hw) (struct efx_nic *efx);
516 void (*reset_xaui) (struct efx_nic *efx); 534 void (*reset_xaui) (struct efx_nic *efx);
517 int mmds; 535 int mmds;
536 unsigned loopbacks;
518}; 537};
519 538
520/* 539/*
@@ -653,7 +672,6 @@ union efx_multicast_hash {
653 * @phy_op: PHY interface 672 * @phy_op: PHY interface
654 * @phy_data: PHY private data (including PHY-specific stats) 673 * @phy_data: PHY private data (including PHY-specific stats)
655 * @mii: PHY interface 674 * @mii: PHY interface
656 * @phy_powered: PHY power state
657 * @tx_disabled: PHY transmitter turned off 675 * @tx_disabled: PHY transmitter turned off
658 * @link_up: Link status 676 * @link_up: Link status
659 * @link_options: Link options (MII/GMII format) 677 * @link_options: Link options (MII/GMII format)
@@ -662,6 +680,9 @@ union efx_multicast_hash {
662 * @multicast_hash: Multicast hash table 680 * @multicast_hash: Multicast hash table
663 * @flow_control: Flow control flags - separate RX/TX so can't use link_options 681 * @flow_control: Flow control flags - separate RX/TX so can't use link_options
664 * @reconfigure_work: work item for dealing with PHY events 682 * @reconfigure_work: work item for dealing with PHY events
683 * @loopback_mode: Loopback status
684 * @loopback_modes: Supported loopback mode bitmask
685 * @loopback_selftest: Offline self-test private state
665 * 686 *
666 * The @priv field of the corresponding &struct net_device points to 687 * The @priv field of the corresponding &struct net_device points to
667 * this. 688 * this.
@@ -721,6 +742,7 @@ struct efx_nic {
721 struct efx_phy_operations *phy_op; 742 struct efx_phy_operations *phy_op;
722 void *phy_data; 743 void *phy_data;
723 struct mii_if_info mii; 744 struct mii_if_info mii;
745 unsigned tx_disabled;
724 746
725 int link_up; 747 int link_up;
726 unsigned int link_options; 748 unsigned int link_options;
@@ -732,6 +754,10 @@ struct efx_nic {
732 struct work_struct reconfigure_work; 754 struct work_struct reconfigure_work;
733 755
734 atomic_t rx_reset; 756 atomic_t rx_reset;
757 enum efx_loopback_mode loopback_mode;
758 unsigned int loopback_modes;
759
760 void *loopback_selftest;
735}; 761};
736 762
737/** 763/**
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 551299b462ae..670622373ddf 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -19,6 +19,7 @@
19#include "rx.h" 19#include "rx.h"
20#include "efx.h" 20#include "efx.h"
21#include "falcon.h" 21#include "falcon.h"
22#include "selftest.h"
22#include "workarounds.h" 23#include "workarounds.h"
23 24
24/* Number of RX descriptors pushed at once. */ 25/* Number of RX descriptors pushed at once. */
@@ -683,6 +684,15 @@ void __efx_rx_packet(struct efx_channel *channel,
683 struct sk_buff *skb; 684 struct sk_buff *skb;
684 int lro = efx->net_dev->features & NETIF_F_LRO; 685 int lro = efx->net_dev->features & NETIF_F_LRO;
685 686
687 /* If we're in loopback test, then pass the packet directly to the
688 * loopback layer, and free the rx_buf here
689 */
690 if (unlikely(efx->loopback_selftest)) {
691 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
692 efx_free_rx_buffer(efx, rx_buf);
693 goto done;
694 }
695
686 if (rx_buf->skb) { 696 if (rx_buf->skb) {
687 prefetch(skb_shinfo(rx_buf->skb)); 697 prefetch(skb_shinfo(rx_buf->skb));
688 698
@@ -736,7 +746,6 @@ void __efx_rx_packet(struct efx_channel *channel,
736 /* Update allocation strategy method */ 746 /* Update allocation strategy method */
737 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 747 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
738 748
739 /* fall-thru */
740done: 749done:
741 efx->net_dev->last_rx = jiffies; 750 efx->net_dev->last_rx = jiffies;
742} 751}
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
new file mode 100644
index 000000000000..cbda15946e8f
--- /dev/null
+++ b/drivers/net/sfc/selftest.c
@@ -0,0 +1,717 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/netdevice.h>
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/kernel_stat.h>
15#include <linux/pci.h>
16#include <linux/ethtool.h>
17#include <linux/ip.h>
18#include <linux/in.h>
19#include <linux/udp.h>
20#include <linux/rtnetlink.h>
21#include <asm/io.h>
22#include "net_driver.h"
23#include "ethtool.h"
24#include "efx.h"
25#include "falcon.h"
26#include "selftest.h"
27#include "boards.h"
28#include "workarounds.h"
29#include "mac.h"
30
31/*
32 * Loopback test packet structure
33 *
34 * The self-test should stress every RSS vector, and unfortunately
35 * Falcon only performs RSS on TCP/UDP packets.
36 */
37struct efx_loopback_payload {
38 struct ethhdr header;
39 struct iphdr ip;
40 struct udphdr udp;
41 __be16 iteration;
42 const char msg[64];
43} __attribute__ ((packed));
44
45/* Loopback test source MAC address */
46static const unsigned char payload_source[ETH_ALEN] = {
47 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
48};
49
50static const char *payload_msg =
51 "Hello world! This is an Efx loopback test in progress!";
52
53/**
54 * efx_selftest_state - persistent state during a selftest
55 * @flush: Drop all packets in efx_loopback_rx_packet
56 * @packet_count: Number of packets being used in this test
57 * @skbs: An array of skbs transmitted
58 * @rx_good: RX good packet count
59 * @rx_bad: RX bad packet count
60 * @payload: Payload used in tests
61 */
62struct efx_selftest_state {
63 int flush;
64 int packet_count;
65 struct sk_buff **skbs;
66 atomic_t rx_good;
67 atomic_t rx_bad;
68 struct efx_loopback_payload payload;
69};
70
71/**************************************************************************
72 *
73 * Configurable values
74 *
75 **************************************************************************/
76
77/* Level of loopback testing
78 *
79 * The maximum packet burst length is 16**(n-1), i.e.
80 *
81 * - Level 0 : no packets
82 * - Level 1 : 1 packet
83 * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets)
84 * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets)
85 *
86 */
87static unsigned int loopback_test_level = 3;
88
89/**************************************************************************
90 *
91 * Interrupt and event queue testing
92 *
93 **************************************************************************/
94
95/* Test generation and receipt of interrupts */
96static int efx_test_interrupts(struct efx_nic *efx,
97 struct efx_self_tests *tests)
98{
99 struct efx_channel *channel;
100
101 EFX_LOG(efx, "testing interrupts\n");
102 tests->interrupt = -1;
103
104 /* Reset interrupt flag */
105 efx->last_irq_cpu = -1;
106 smp_wmb();
107
108 /* ACK each interrupting event queue. Receiving an interrupt due to
109 * traffic before a test event is raised is considered a pass */
110 efx_for_each_channel_with_interrupt(channel, efx) {
111 if (channel->work_pending)
112 efx_process_channel_now(channel);
113 if (efx->last_irq_cpu >= 0)
114 goto success;
115 }
116
117 falcon_generate_interrupt(efx);
118
119 /* Wait for arrival of test interrupt. */
120 EFX_LOG(efx, "waiting for test interrupt\n");
121 schedule_timeout_uninterruptible(HZ / 10);
122 if (efx->last_irq_cpu >= 0)
123 goto success;
124
125 EFX_ERR(efx, "timed out waiting for interrupt\n");
126 return -ETIMEDOUT;
127
128 success:
129 EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n",
130 efx->interrupt_mode, efx->last_irq_cpu);
131 tests->interrupt = 1;
132 return 0;
133}
134
135/* Test generation and receipt of non-interrupting events */
136static int efx_test_eventq(struct efx_channel *channel,
137 struct efx_self_tests *tests)
138{
139 unsigned int magic;
140
141 /* Channel specific code, limited to 20 bits */
142 magic = (0x00010150 + channel->channel);
143 EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
144 channel->channel, magic);
145
146 tests->eventq_dma[channel->channel] = -1;
147 tests->eventq_int[channel->channel] = 1; /* fake pass */
148 tests->eventq_poll[channel->channel] = 1; /* fake pass */
149
150 /* Reset flag and zero magic word */
151 channel->efx->last_irq_cpu = -1;
152 channel->eventq_magic = 0;
153 smp_wmb();
154
155 falcon_generate_test_event(channel, magic);
156 udelay(1);
157
158 efx_process_channel_now(channel);
159 if (channel->eventq_magic != magic) {
160 EFX_ERR(channel->efx, "channel %d failed to see test event\n",
161 channel->channel);
162 return -ETIMEDOUT;
163 } else {
164 tests->eventq_dma[channel->channel] = 1;
165 }
166
167 return 0;
168}
169
170/* Test generation and receipt of interrupting events */
171static int efx_test_eventq_irq(struct efx_channel *channel,
172 struct efx_self_tests *tests)
173{
174 unsigned int magic, count;
175
176 /* Channel specific code, limited to 20 bits */
177 magic = (0x00010150 + channel->channel);
178 EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
179 channel->channel, magic);
180
181 tests->eventq_dma[channel->channel] = -1;
182 tests->eventq_int[channel->channel] = -1;
183 tests->eventq_poll[channel->channel] = -1;
184
185 /* Reset flag and zero magic word */
186 channel->efx->last_irq_cpu = -1;
187 channel->eventq_magic = 0;
188 smp_wmb();
189
190 falcon_generate_test_event(channel, magic);
191
192 /* Wait for arrival of interrupt */
193 count = 0;
194 do {
195 schedule_timeout_uninterruptible(HZ / 100);
196
197 if (channel->work_pending)
198 efx_process_channel_now(channel);
199
200 if (channel->eventq_magic == magic)
201 goto eventq_ok;
202 } while (++count < 2);
203
204 EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n",
205 channel->channel);
206
207 /* See if interrupt arrived */
208 if (channel->efx->last_irq_cpu >= 0) {
209 EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d "
210 "during event queue test\n", channel->channel,
211 raw_smp_processor_id());
212 tests->eventq_int[channel->channel] = 1;
213 }
214
215 /* Check to see if event was received even if interrupt wasn't */
216 efx_process_channel_now(channel);
217 if (channel->eventq_magic == magic) {
218 EFX_ERR(channel->efx, "channel %d event was generated, but "
219 "failed to trigger an interrupt\n", channel->channel);
220 tests->eventq_dma[channel->channel] = 1;
221 }
222
223 return -ETIMEDOUT;
224 eventq_ok:
225 EFX_LOG(channel->efx, "channel %d event queue passed\n",
226 channel->channel);
227 tests->eventq_dma[channel->channel] = 1;
228 tests->eventq_int[channel->channel] = 1;
229 tests->eventq_poll[channel->channel] = 1;
230 return 0;
231}
232
233/**************************************************************************
234 *
235 * PHY testing
236 *
237 **************************************************************************/
238
239/* Check PHY presence by reading the PHY ID registers */
240static int efx_test_phy(struct efx_nic *efx,
241 struct efx_self_tests *tests)
242{
243 u16 physid1, physid2;
244 struct mii_if_info *mii = &efx->mii;
245 struct net_device *net_dev = efx->net_dev;
246
247 if (efx->phy_type == PHY_TYPE_NONE)
248 return 0;
249
250 EFX_LOG(efx, "testing PHY presence\n");
251 tests->phy_ok = -1;
252
253 physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
254 physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
255
256 if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
257 (physid2 != 0x0000) && (physid2 != 0xffff)) {
258 EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
259 mii->phy_id, physid1, physid2);
260 tests->phy_ok = 1;
261 return 0;
262 }
263
264 EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
265 return -ENODEV;
266}
267
268/**************************************************************************
269 *
270 * Loopback testing
271 * NB Only one loopback test can be executing concurrently.
272 *
273 **************************************************************************/
274
275/* Loopback test RX callback
276 * This is called for each received packet during loopback testing.
277 */
278void efx_loopback_rx_packet(struct efx_nic *efx,
279 const char *buf_ptr, int pkt_len)
280{
281 struct efx_selftest_state *state = efx->loopback_selftest;
282 struct efx_loopback_payload *received;
283 struct efx_loopback_payload *payload;
284
285 BUG_ON(!buf_ptr);
286
287 /* If we are just flushing, then drop the packet */
288 if ((state == NULL) || state->flush)
289 return;
290
291 payload = &state->payload;
292
293 received = (struct efx_loopback_payload *)(char *) buf_ptr;
294 received->ip.saddr = payload->ip.saddr;
295 received->ip.check = payload->ip.check;
296
297 /* Check that header exists */
298 if (pkt_len < sizeof(received->header)) {
299 EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
300 "test\n", pkt_len, LOOPBACK_MODE(efx));
301 goto err;
302 }
303
304 /* Check that the ethernet header exists */
305 if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
306 EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n",
307 LOOPBACK_MODE(efx));
308 goto err;
309 }
310
311 /* Check packet length */
312 if (pkt_len != sizeof(*payload)) {
313 EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in "
314 "%s loopback test\n", pkt_len, (int)sizeof(*payload),
315 LOOPBACK_MODE(efx));
316 goto err;
317 }
318
319 /* Check that IP header matches */
320 if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
321 EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n",
322 LOOPBACK_MODE(efx));
323 goto err;
324 }
325
326 /* Check that msg and padding matches */
327 if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
328 EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n",
329 LOOPBACK_MODE(efx));
330 goto err;
331 }
332
333 /* Check that iteration matches */
334 if (received->iteration != payload->iteration) {
335 EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in "
336 "%s loopback test\n", ntohs(received->iteration),
337 ntohs(payload->iteration), LOOPBACK_MODE(efx));
338 goto err;
339 }
340
341 /* Increase correct RX count */
342 EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
343 LOOPBACK_MODE(efx));
344
345 atomic_inc(&state->rx_good);
346 return;
347
348 err:
349#ifdef EFX_ENABLE_DEBUG
350 if (atomic_read(&state->rx_bad) == 0) {
351 EFX_ERR(efx, "received packet:\n");
352 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
353 buf_ptr, pkt_len, 0);
354 EFX_ERR(efx, "expected packet:\n");
355 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
356 &state->payload, sizeof(state->payload), 0);
357 }
358#endif
359 atomic_inc(&state->rx_bad);
360}
361
362/* Initialise an efx_selftest_state for a new iteration */
363static void efx_iterate_state(struct efx_nic *efx)
364{
365 struct efx_selftest_state *state = efx->loopback_selftest;
366 struct net_device *net_dev = efx->net_dev;
367 struct efx_loopback_payload *payload = &state->payload;
368
369 /* Initialise the layerII header */
370 memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
371 memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
372 payload->header.h_proto = htons(ETH_P_IP);
373
374 /* saddr set later and used as incrementing count */
375 payload->ip.daddr = htonl(INADDR_LOOPBACK);
376 payload->ip.ihl = 5;
377 payload->ip.check = htons(0xdead);
378 payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
379 payload->ip.version = IPVERSION;
380 payload->ip.protocol = IPPROTO_UDP;
381
382 /* Initialise udp header */
383 payload->udp.source = 0;
384 payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
385 sizeof(struct iphdr));
386 payload->udp.check = 0; /* checksum ignored */
387
388 /* Fill out payload */
389 payload->iteration = htons(ntohs(payload->iteration) + 1);
390 memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
391
392 /* Fill out remaining state members */
393 atomic_set(&state->rx_good, 0);
394 atomic_set(&state->rx_bad, 0);
395 smp_wmb();
396}
397
398static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
399{
400 struct efx_nic *efx = tx_queue->efx;
401 struct efx_selftest_state *state = efx->loopback_selftest;
402 struct efx_loopback_payload *payload;
403 struct sk_buff *skb;
404 int i, rc;
405
406 /* Transmit N copies of buffer */
407 for (i = 0; i < state->packet_count; i++) {
408 /* Allocate an skb, holding an extra reference for
409 * transmit completion counting */
410 skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
411 if (!skb)
412 return -ENOMEM;
413 state->skbs[i] = skb;
414 skb_get(skb);
415
416 /* Copy the payload in, incrementing the source address to
417 * exercise the rss vectors */
418 payload = ((struct efx_loopback_payload *)
419 skb_put(skb, sizeof(state->payload)));
420 memcpy(payload, &state->payload, sizeof(state->payload));
421 payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
422
423 /* Ensure everything we've written is visible to the
424 * interrupt handler. */
425 smp_wmb();
426
427 if (NET_DEV_REGISTERED(efx))
428 netif_tx_lock_bh(efx->net_dev);
429 rc = efx_xmit(efx, tx_queue, skb);
430 if (NET_DEV_REGISTERED(efx))
431 netif_tx_unlock_bh(efx->net_dev);
432
433 if (rc != NETDEV_TX_OK) {
434 EFX_ERR(efx, "TX queue %d could not transmit packet %d "
435 "of %d in %s loopback test\n", tx_queue->queue,
436 i + 1, state->packet_count, LOOPBACK_MODE(efx));
437
438 /* Defer cleaning up the other skbs for the caller */
439 kfree_skb(skb);
440 return -EPIPE;
441 }
442 }
443
444 return 0;
445}
446
447static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
448 struct efx_loopback_self_tests *lb_tests)
449{
450 struct efx_nic *efx = tx_queue->efx;
451 struct efx_selftest_state *state = efx->loopback_selftest;
452 struct sk_buff *skb;
453 int tx_done = 0, rx_good, rx_bad;
454 int i, rc = 0;
455
456 if (NET_DEV_REGISTERED(efx))
457 netif_tx_lock_bh(efx->net_dev);
458
459 /* Count the number of tx completions, and decrement the refcnt. Any
460 * skbs not already completed will be free'd when the queue is flushed */
461 for (i=0; i < state->packet_count; i++) {
462 skb = state->skbs[i];
463 if (skb && !skb_shared(skb))
464 ++tx_done;
465 dev_kfree_skb_any(skb);
466 }
467
468 if (NET_DEV_REGISTERED(efx))
469 netif_tx_unlock_bh(efx->net_dev);
470
471 /* Check TX completion and received packet counts */
472 rx_good = atomic_read(&state->rx_good);
473 rx_bad = atomic_read(&state->rx_bad);
474 if (tx_done != state->packet_count) {
475 /* Don't free the skbs; they will be picked up on TX
476 * overflow or channel teardown.
477 */
478 EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d "
479 "TX completion events in %s loopback test\n",
480 tx_queue->queue, tx_done, state->packet_count,
481 LOOPBACK_MODE(efx));
482 rc = -ETIMEDOUT;
483 /* Allow to fall through so we see the RX errors as well */
484 }
485
486 /* We may always be up to a flush away from our desired packet total */
487 if (rx_good != state->packet_count) {
488 EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d "
489 "received packets in %s loopback test\n",
490 tx_queue->queue, rx_good, state->packet_count,
491 LOOPBACK_MODE(efx));
492 rc = -ETIMEDOUT;
493 /* Fall through */
494 }
495
496 /* Update loopback test structure */
497 lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
498 lb_tests->tx_done[tx_queue->queue] += tx_done;
499 lb_tests->rx_good += rx_good;
500 lb_tests->rx_bad += rx_bad;
501
502 return rc;
503}
504
505static int
506efx_test_loopback(struct efx_tx_queue *tx_queue,
507 struct efx_loopback_self_tests *lb_tests)
508{
509 struct efx_nic *efx = tx_queue->efx;
510 struct efx_selftest_state *state = efx->loopback_selftest;
511 struct efx_channel *channel;
512 int i, rc = 0;
513
514 for (i = 0; i < loopback_test_level; i++) {
515 /* Determine how many packets to send */
516 state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
517 state->packet_count = min(1 << (i << 2), state->packet_count);
518 state->skbs = kzalloc(sizeof(state->skbs[0]) *
519 state->packet_count, GFP_KERNEL);
520 state->flush = 0;
521
522 EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
523 "packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
524 state->packet_count);
525
526 efx_iterate_state(efx);
527 rc = efx_tx_loopback(tx_queue);
528
529 /* NAPI polling is not enabled, so process channels synchronously */
530 schedule_timeout_uninterruptible(HZ / 50);
531 efx_for_each_channel_with_interrupt(channel, efx) {
532 if (channel->work_pending)
533 efx_process_channel_now(channel);
534 }
535
536 rc |= efx_rx_loopback(tx_queue, lb_tests);
537 kfree(state->skbs);
538
539 if (rc) {
540 /* Wait a while to ensure there are no packets
541 * floating around after a failure. */
542 schedule_timeout_uninterruptible(HZ / 10);
543 return rc;
544 }
545 }
546
547 EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length "
548 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
549 state->packet_count);
550
551 return rc;
552}
553
554static int efx_test_loopbacks(struct efx_nic *efx,
555 struct efx_self_tests *tests,
556 unsigned int loopback_modes)
557{
558 struct efx_selftest_state *state = efx->loopback_selftest;
559 struct ethtool_cmd ecmd, ecmd_loopback;
560 struct efx_tx_queue *tx_queue;
561 enum efx_loopback_mode old_mode, mode;
562 int count, rc = 0, link_up;
563
564 rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
565 if (rc) {
566 EFX_ERR(efx, "could not get GMII settings\n");
567 return rc;
568 }
569 old_mode = efx->loopback_mode;
570
571 /* Disable autonegotiation for the purposes of loopback */
572 memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
573 if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
574 ecmd_loopback.autoneg = AUTONEG_DISABLE;
575 ecmd_loopback.duplex = DUPLEX_FULL;
576 ecmd_loopback.speed = SPEED_10000;
577 }
578
579 rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback);
580 if (rc) {
581 EFX_ERR(efx, "could not disable autonegotiation\n");
582 goto out;
583 }
584 tests->loopback_speed = ecmd_loopback.speed;
585 tests->loopback_full_duplex = ecmd_loopback.duplex;
586
587 /* Test all supported loopback modes */
588 for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
589 if (!(loopback_modes & (1 << mode)))
590 continue;
591
592 /* Move the port into the specified loopback mode. */
593 state->flush = 1;
594 efx->loopback_mode = mode;
595 efx_reconfigure_port(efx);
596
597 /* Wait for the PHY to signal the link is up */
598 count = 0;
599 do {
600 struct efx_channel *channel = &efx->channel[0];
601
602 falcon_check_xmac(efx);
603 schedule_timeout_uninterruptible(HZ / 10);
604 if (channel->work_pending)
605 efx_process_channel_now(channel);
606 /* Wait for PHY events to be processed */
607 flush_workqueue(efx->workqueue);
608 rmb();
609
610 /* efx->link_up can be 1 even if the XAUI link is down,
611 * (bug5762). Usually, it's not worth bothering with the
612 * difference, but for selftests, we need that extra
613 * guarantee that the link is really, really, up.
614 */
615 link_up = efx->link_up;
616 if (!falcon_xaui_link_ok(efx))
617 link_up = 0;
618
619 } while ((++count < 20) && !link_up);
620
621 /* The link should now be up. If it isn't, there is no point
622 * in attempting a loopback test */
623 if (!link_up) {
624 EFX_ERR(efx, "loopback %s never came up\n",
625 LOOPBACK_MODE(efx));
626 rc = -EIO;
627 goto out;
628 }
629
630 EFX_LOG(efx, "link came up in %s loopback in %d iterations\n",
631 LOOPBACK_MODE(efx), count);
632
633 /* Test every TX queue */
634 efx_for_each_tx_queue(tx_queue, efx) {
635 rc |= efx_test_loopback(tx_queue,
636 &tests->loopback[mode]);
637 if (rc)
638 goto out;
639 }
640 }
641
642 out:
643 /* Take out of loopback and restore PHY settings */
644 state->flush = 1;
645 efx->loopback_mode = old_mode;
646 efx_ethtool_set_settings(efx->net_dev, &ecmd);
647
648 return rc;
649}
650
651/**************************************************************************
652 *
653 * Entry points
654 *
655 *************************************************************************/
656
657/* Online (i.e. non-disruptive) testing
658 * This checks interrupt generation, event delivery and PHY presence. */
659int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
660{
661 struct efx_channel *channel;
662 int rc = 0;
663
664 EFX_LOG(efx, "performing online self-tests\n");
665
666 rc |= efx_test_interrupts(efx, tests);
667 efx_for_each_channel(channel, efx) {
668 if (channel->has_interrupt)
669 rc |= efx_test_eventq_irq(channel, tests);
670 else
671 rc |= efx_test_eventq(channel, tests);
672 }
673 rc |= efx_test_phy(efx, tests);
674
675 if (rc)
676 EFX_ERR(efx, "failed online self-tests\n");
677
678 return rc;
679}
680
681/* Offline (i.e. disruptive) testing
682 * This checks MAC and PHY loopback on the specified port. */
683int efx_offline_test(struct efx_nic *efx,
684 struct efx_self_tests *tests, unsigned int loopback_modes)
685{
686 struct efx_selftest_state *state;
687 int rc = 0;
688
689 EFX_LOG(efx, "performing offline self-tests\n");
690
691 /* Create a selftest_state structure to hold state for the test */
692 state = kzalloc(sizeof(*state), GFP_KERNEL);
693 if (state == NULL) {
694 rc = -ENOMEM;
695 goto out;
696 }
697
698 /* Set the port loopback_selftest member. From this point on
699 * all received packets will be dropped. Mark the state as
700 * "flushing" so all inflight packets are dropped */
701 BUG_ON(efx->loopback_selftest);
702 state->flush = 1;
703 efx->loopback_selftest = (void *)state;
704
705 rc = efx_test_loopbacks(efx, tests, loopback_modes);
706
707 efx->loopback_selftest = NULL;
708 wmb();
709 kfree(state);
710
711 out:
712 if (rc)
713 EFX_ERR(efx, "failed offline self-tests\n");
714
715 return rc;
716}
717
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
new file mode 100644
index 000000000000..f6999c2b622d
--- /dev/null
+++ b/drivers/net/sfc/selftest.h
@@ -0,0 +1,50 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_SELFTEST_H
12#define EFX_SELFTEST_H
13
14#include "net_driver.h"
15
16/*
17 * Self tests
18 */
19
20struct efx_loopback_self_tests {
21 int tx_sent[EFX_MAX_TX_QUEUES];
22 int tx_done[EFX_MAX_TX_QUEUES];
23 int rx_good;
24 int rx_bad;
25};
26
27/* Efx self test results
28 * For fields which are not counters, 1 indicates success and -1
29 * indicates failure.
30 */
31struct efx_self_tests {
32 int interrupt;
33 int eventq_dma[EFX_MAX_CHANNELS];
34 int eventq_int[EFX_MAX_CHANNELS];
35 int eventq_poll[EFX_MAX_CHANNELS];
36 int phy_ok;
37 int loopback_speed;
38 int loopback_full_duplex;
39 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX];
40};
41
42extern void efx_loopback_rx_packet(struct efx_nic *efx,
43 const char *buf_ptr, int pkt_len);
44extern int efx_online_test(struct efx_nic *efx,
45 struct efx_self_tests *tests);
46extern int efx_offline_test(struct efx_nic *efx,
47 struct efx_self_tests *tests,
48 unsigned int loopback_modes);
49
50#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 11fa9fb8f48b..725d1a539c49 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -130,6 +130,15 @@ void sfe4001_poweroff(struct efx_nic *efx)
130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
131} 131}
132 132
133/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
134 * to the FLASH_CFG_1 input on the DSP. We must keep it high at power-
135 * up to allow writing the flash (done through MDIO from userland).
136 */
137unsigned int sfe4001_phy_flash_cfg;
138module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444);
139MODULE_PARM_DESC(phy_flash_cfg,
140 "Force PHY to enter flash configuration mode");
141
133/* This board uses an I2C expander to provider power to the PHY, which needs to 142/* This board uses an I2C expander to provider power to the PHY, which needs to
134 * be turned on before the PHY can be used. 143 * be turned on before the PHY can be used.
135 * Context: Process context, rtnl lock held 144 * Context: Process context, rtnl lock held
@@ -203,6 +212,8 @@ int sfe4001_poweron(struct efx_nic *efx)
203 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | 212 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
204 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | 213 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
205 (1 << P0_X_TRST_LBN)); 214 (1 << P0_X_TRST_LBN));
215 if (sfe4001_phy_flash_cfg)
216 out |= 1 << P0_EN_3V3X_LBN;
206 217
207 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 218 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
208 if (rc) 219 if (rc)
@@ -226,6 +237,9 @@ int sfe4001_poweron(struct efx_nic *efx)
226 if (in & (1 << P1_AFE_PWD_LBN)) 237 if (in & (1 << P1_AFE_PWD_LBN))
227 goto done; 238 goto done;
228 239
240 /* DSP doesn't look powered in flash config mode */
241 if (sfe4001_phy_flash_cfg)
242 goto done;
229 } while (++count < 20); 243 } while (++count < 20);
230 244
231 EFX_INFO(efx, "timed out waiting for power\n"); 245 EFX_INFO(efx, "timed out waiting for power\n");
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index a2e9f79e47b1..b1cd6deec01f 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -24,6 +24,11 @@
24 MDIO_MMDREG_DEVS0_PCS | \ 24 MDIO_MMDREG_DEVS0_PCS | \
25 MDIO_MMDREG_DEVS0_PHYXS) 25 MDIO_MMDREG_DEVS0_PHYXS)
26 26
27#define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
28 (1 << LOOPBACK_PCS) | \
29 (1 << LOOPBACK_PMAPMD) | \
30 (1 << LOOPBACK_NETWORK))
31
27/* We complain if we fail to see the link partner as 10G capable this many 32/* We complain if we fail to see the link partner as 10G capable this many
28 * times in a row (must be > 1 as sampling the autoneg. registers is racy) 33 * times in a row (must be > 1 as sampling the autoneg. registers is racy)
29 */ 34 */
@@ -72,6 +77,10 @@
72#define PMA_PMD_BIST_RXD_LBN (1) 77#define PMA_PMD_BIST_RXD_LBN (1)
73#define PMA_PMD_BIST_AFE_LBN (0) 78#define PMA_PMD_BIST_AFE_LBN (0)
74 79
80/* Special Software reset register */
81#define PMA_PMD_EXT_CTRL_REG 49152
82#define PMA_PMD_EXT_SSR_LBN 15
83
75#define BIST_MAX_DELAY (1000) 84#define BIST_MAX_DELAY (1000)
76#define BIST_POLL_DELAY (10) 85#define BIST_POLL_DELAY (10)
77 86
@@ -86,6 +95,11 @@
86#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ 95#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */
87#define CLK312_EN_LBN 3 96#define CLK312_EN_LBN 3
88 97
98/* PHYXS registers */
99#define PHYXS_TEST1 (49162)
100#define LOOPBACK_NEAR_LBN (8)
101#define LOOPBACK_NEAR_WIDTH (1)
102
89/* Boot status register */ 103/* Boot status register */
90#define PCS_BOOT_STATUS_REG (0xd000) 104#define PCS_BOOT_STATUS_REG (0xd000)
91#define PCS_BOOT_FATAL_ERR_LBN (0) 105#define PCS_BOOT_FATAL_ERR_LBN (0)
@@ -106,7 +120,9 @@ MODULE_PARM_DESC(crc_error_reset_threshold,
106 120
107struct tenxpress_phy_data { 121struct tenxpress_phy_data {
108 enum tenxpress_state state; 122 enum tenxpress_state state;
123 enum efx_loopback_mode loopback_mode;
109 atomic_t bad_crc_count; 124 atomic_t bad_crc_count;
125 int tx_disabled;
110 int bad_lp_tries; 126 int bad_lp_tries;
111}; 127};
112 128
@@ -199,10 +215,12 @@ static int tenxpress_phy_init(struct efx_nic *efx)
199 215
200 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); 216 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
201 217
202 rc = mdio_clause45_wait_reset_mmds(efx, 218 if (!sfe4001_phy_flash_cfg) {
203 TENXPRESS_REQUIRED_DEVS); 219 rc = mdio_clause45_wait_reset_mmds(efx,
204 if (rc < 0) 220 TENXPRESS_REQUIRED_DEVS);
205 goto fail; 221 if (rc < 0)
222 goto fail;
223 }
206 224
207 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 225 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
208 if (rc < 0) 226 if (rc < 0)
@@ -225,6 +243,35 @@ static int tenxpress_phy_init(struct efx_nic *efx)
225 return rc; 243 return rc;
226} 244}
227 245
246static int tenxpress_special_reset(struct efx_nic *efx)
247{
248 int rc, reg;
249
250 EFX_TRACE(efx, "%s\n", __func__);
251
252 /* Initiate reset */
253 reg = mdio_clause45_read(efx, efx->mii.phy_id,
254 MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG);
255 reg |= (1 << PMA_PMD_EXT_SSR_LBN);
256 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
257 PMA_PMD_EXT_CTRL_REG, reg);
258
259 msleep(200);
260
261 /* Wait for the blocks to come out of reset */
262 rc = mdio_clause45_wait_reset_mmds(efx,
263 TENXPRESS_REQUIRED_DEVS);
264 if (rc < 0)
265 return rc;
266
267 /* Try and reconfigure the device */
268 rc = tenxpress_init(efx);
269 if (rc < 0)
270 return rc;
271
272 return 0;
273}
274
228static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) 275static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
229{ 276{
230 struct tenxpress_phy_data *pd = efx->phy_data; 277 struct tenxpress_phy_data *pd = efx->phy_data;
@@ -299,11 +346,46 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
299 return ok; 346 return ok;
300} 347}
301 348
349static void tenxpress_phyxs_loopback(struct efx_nic *efx)
350{
351 int phy_id = efx->mii.phy_id;
352 int ctrl1, ctrl2;
353
354 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
355 PHYXS_TEST1);
356 if (efx->loopback_mode == LOOPBACK_PHYXS)
357 ctrl2 |= (1 << LOOPBACK_NEAR_LBN);
358 else
359 ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN);
360 if (ctrl1 != ctrl2)
361 mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
362 PHYXS_TEST1, ctrl2);
363}
364
302static void tenxpress_phy_reconfigure(struct efx_nic *efx) 365static void tenxpress_phy_reconfigure(struct efx_nic *efx)
303{ 366{
367 struct tenxpress_phy_data *phy_data = efx->phy_data;
368 int loop_change = LOOPBACK_OUT_OF(phy_data, efx,
369 TENXPRESS_LOOPBACKS);
370
304 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) 371 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
305 return; 372 return;
306 373
374 /* When coming out of transmit disable, coming out of low power
375 * mode, or moving out of any PHY internal loopback mode,
376 * perform a special software reset */
377 if ((phy_data->tx_disabled && !efx->tx_disabled) ||
378 loop_change) {
379 (void) tenxpress_special_reset(efx);
380 falcon_reset_xaui(efx);
381 }
382
383 mdio_clause45_transmit_disable(efx);
384 mdio_clause45_phy_reconfigure(efx);
385 tenxpress_phyxs_loopback(efx);
386
387 phy_data->tx_disabled = efx->tx_disabled;
388 phy_data->loopback_mode = efx->loopback_mode;
307 efx->link_up = tenxpress_link_ok(efx, 0); 389 efx->link_up = tenxpress_link_ok(efx, 0);
308 efx->link_options = GM_LPA_10000FULL; 390 efx->link_options = GM_LPA_10000FULL;
309} 391}
@@ -431,4 +513,5 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = {
431 .clear_interrupt = tenxpress_phy_clear_interrupt, 513 .clear_interrupt = tenxpress_phy_clear_interrupt,
432 .reset_xaui = tenxpress_reset_xaui, 514 .reset_xaui = tenxpress_reset_xaui,
433 .mmds = TENXPRESS_REQUIRED_DEVS, 515 .mmds = TENXPRESS_REQUIRED_DEVS,
516 .loopbacks = TENXPRESS_LOOPBACKS,
434}; 517};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index fbb866b2185e..9b436f5b4888 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -82,6 +82,46 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
82 } 82 }
83} 83}
84 84
85/**
86 * struct efx_tso_header - a DMA mapped buffer for packet headers
87 * @next: Linked list of free ones.
88 * The list is protected by the TX queue lock.
89 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
90 * @dma_addr: The DMA address of the header below.
91 *
92 * This controls the memory used for a TSO header. Use TSOH_DATA()
93 * to find the packet header data. Use TSOH_SIZE() to calculate the
94 * total size required for a given packet header length. TSO headers
95 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
96 */
97struct efx_tso_header {
98 union {
99 struct efx_tso_header *next;
100 size_t unmap_len;
101 };
102 dma_addr_t dma_addr;
103};
104
105static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
106 const struct sk_buff *skb);
107static void efx_fini_tso(struct efx_tx_queue *tx_queue);
108static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
109 struct efx_tso_header *tsoh);
110
111static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
112 struct efx_tx_buffer *buffer)
113{
114 if (buffer->tsoh) {
115 if (likely(!buffer->tsoh->unmap_len)) {
116 buffer->tsoh->next = tx_queue->tso_headers_free;
117 tx_queue->tso_headers_free = buffer->tsoh;
118 } else {
119 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
120 }
121 buffer->tsoh = NULL;
122 }
123}
124
85 125
86/* 126/*
87 * Add a socket buffer to a TX queue 127 * Add a socket buffer to a TX queue
@@ -114,6 +154,9 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
114 154
115 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 155 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
116 156
157 if (skb_shinfo((struct sk_buff *)skb)->gso_size)
158 return efx_enqueue_skb_tso(tx_queue, skb);
159
117 /* Get size of the initial fragment */ 160 /* Get size of the initial fragment */
118 len = skb_headlen(skb); 161 len = skb_headlen(skb);
119 162
@@ -166,6 +209,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
166 insert_ptr = (tx_queue->insert_count & 209 insert_ptr = (tx_queue->insert_count &
167 efx->type->txd_ring_mask); 210 efx->type->txd_ring_mask);
168 buffer = &tx_queue->buffer[insert_ptr]; 211 buffer = &tx_queue->buffer[insert_ptr];
212 efx_tsoh_free(tx_queue, buffer);
213 EFX_BUG_ON_PARANOID(buffer->tsoh);
169 EFX_BUG_ON_PARANOID(buffer->skb); 214 EFX_BUG_ON_PARANOID(buffer->skb);
170 EFX_BUG_ON_PARANOID(buffer->len); 215 EFX_BUG_ON_PARANOID(buffer->len);
171 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 216 EFX_BUG_ON_PARANOID(buffer->continuation != 1);
@@ -432,6 +477,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
432 477
433 efx_release_tx_buffers(tx_queue); 478 efx_release_tx_buffers(tx_queue);
434 479
480 /* Free up TSO header cache */
481 efx_fini_tso(tx_queue);
482
435 /* Release queue's stop on port, if any */ 483 /* Release queue's stop on port, if any */
436 if (tx_queue->stopped) { 484 if (tx_queue->stopped) {
437 tx_queue->stopped = 0; 485 tx_queue->stopped = 0;
@@ -450,3 +498,619 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
450} 498}
451 499
452 500
501/* Efx TCP segmentation acceleration.
502 *
503 * Why? Because by doing it here in the driver we can go significantly
504 * faster than the GSO.
505 *
506 * Requires TX checksum offload support.
507 */
508
509/* Number of bytes inserted at the start of a TSO header buffer,
510 * similar to NET_IP_ALIGN.
511 */
512#if defined(__i386__) || defined(__x86_64__)
513#define TSOH_OFFSET 0
514#else
515#define TSOH_OFFSET NET_IP_ALIGN
516#endif
517
518#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
519
520/* Total size of struct efx_tso_header, buffer and padding */
521#define TSOH_SIZE(hdr_len) \
522 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
523
524/* Size of blocks on free list. Larger blocks must be allocated from
525 * the heap.
526 */
527#define TSOH_STD_SIZE 128
528
529#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
530#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
531#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
532#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
533
534/**
535 * struct tso_state - TSO state for an SKB
536 * @remaining_len: Bytes of data we've yet to segment
537 * @seqnum: Current sequence number
538 * @packet_space: Remaining space in current packet
539 * @ifc: Input fragment cursor.
540 * Where we are in the current fragment of the incoming SKB. These
541 * values get updated in place when we split a fragment over
542 * multiple packets.
543 * @p: Parameters.
544 * These values are set once at the start of the TSO send and do
545 * not get changed as the routine progresses.
546 *
547 * The state used during segmentation. It is put into this data structure
548 * just to make it easy to pass into inline functions.
549 */
550struct tso_state {
551 unsigned remaining_len;
552 unsigned seqnum;
553 unsigned packet_space;
554
555 struct {
556 /* DMA address of current position */
557 dma_addr_t dma_addr;
558 /* Remaining length */
559 unsigned int len;
560 /* DMA address and length of the whole fragment */
561 unsigned int unmap_len;
562 dma_addr_t unmap_addr;
563 struct page *page;
564 unsigned page_off;
565 } ifc;
566
567 struct {
568 /* The number of bytes of header */
569 unsigned int header_length;
570
571 /* The number of bytes to put in each outgoing segment. */
572 int full_packet_size;
573
574 /* Current IPv4 ID, host endian. */
575 unsigned ipv4_id;
576 } p;
577};
578
579
580/*
581 * Verify that our various assumptions about sk_buffs and the conditions
582 * under which TSO will be attempted hold true.
583 */
584static inline void efx_tso_check_safe(const struct sk_buff *skb)
585{
586 EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
587 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
588 skb->protocol);
589 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
590 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
591 + (tcp_hdr(skb)->doff << 2u)) >
592 skb_headlen(skb));
593}
594
595
596/*
597 * Allocate a page worth of efx_tso_header structures, and string them
598 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
599 */
600static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
601{
602
603 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
604 struct efx_tso_header *tsoh;
605 dma_addr_t dma_addr;
606 u8 *base_kva, *kva;
607
608 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
609 if (base_kva == NULL) {
610 EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
611 " headers\n");
612 return -ENOMEM;
613 }
614
615 /* pci_alloc_consistent() allocates pages. */
616 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
617
618 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
619 tsoh = (struct efx_tso_header *)kva;
620 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
621 tsoh->next = tx_queue->tso_headers_free;
622 tx_queue->tso_headers_free = tsoh;
623 }
624
625 return 0;
626}
627
628
629/* Free up a TSO header, and all others in the same page. */
630static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
631 struct efx_tso_header *tsoh,
632 struct pci_dev *pci_dev)
633{
634 struct efx_tso_header **p;
635 unsigned long base_kva;
636 dma_addr_t base_dma;
637
638 base_kva = (unsigned long)tsoh & PAGE_MASK;
639 base_dma = tsoh->dma_addr & PAGE_MASK;
640
641 p = &tx_queue->tso_headers_free;
642 while (*p != NULL)
643 if (((unsigned long)*p & PAGE_MASK) == base_kva)
644 *p = (*p)->next;
645 else
646 p = &(*p)->next;
647
648 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
649}
650
651static struct efx_tso_header *
652efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
653{
654 struct efx_tso_header *tsoh;
655
656 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
657 if (unlikely(!tsoh))
658 return NULL;
659
660 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
661 TSOH_BUFFER(tsoh), header_len,
662 PCI_DMA_TODEVICE);
663 if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
664 kfree(tsoh);
665 return NULL;
666 }
667
668 tsoh->unmap_len = header_len;
669 return tsoh;
670}
671
672static void
673efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
674{
675 pci_unmap_single(tx_queue->efx->pci_dev,
676 tsoh->dma_addr, tsoh->unmap_len,
677 PCI_DMA_TODEVICE);
678 kfree(tsoh);
679}
680
681/**
682 * efx_tx_queue_insert - push descriptors onto the TX queue
683 * @tx_queue: Efx TX queue
684 * @dma_addr: DMA address of fragment
685 * @len: Length of fragment
686 * @skb: Only non-null for end of last segment
687 * @end_of_packet: True if last fragment in a packet
688 * @unmap_addr: DMA address of fragment for unmapping
689 * @unmap_len: Only set this in last segment of a fragment
690 *
691 * Push descriptors onto the TX queue. Return 0 on success or 1 if
692 * @tx_queue full.
693 */
694static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
695 dma_addr_t dma_addr, unsigned len,
696 const struct sk_buff *skb, int end_of_packet,
697 dma_addr_t unmap_addr, unsigned unmap_len)
698{
699 struct efx_tx_buffer *buffer;
700 struct efx_nic *efx = tx_queue->efx;
701 unsigned dma_len, fill_level, insert_ptr, misalign;
702 int q_space;
703
704 EFX_BUG_ON_PARANOID(len <= 0);
705
706 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
707 /* -1 as there is no way to represent all descriptors used */
708 q_space = efx->type->txd_ring_mask - 1 - fill_level;
709
710 while (1) {
711 if (unlikely(q_space-- <= 0)) {
712 /* It might be that completions have happened
713 * since the xmit path last checked. Update
714 * the xmit path's copy of read_count.
715 */
716 ++tx_queue->stopped;
717 /* This memory barrier protects the change of
718 * stopped from the access of read_count. */
719 smp_mb();
720 tx_queue->old_read_count =
721 *(volatile unsigned *)&tx_queue->read_count;
722 fill_level = (tx_queue->insert_count
723 - tx_queue->old_read_count);
724 q_space = efx->type->txd_ring_mask - 1 - fill_level;
725 if (unlikely(q_space-- <= 0))
726 return 1;
727 smp_mb();
728 --tx_queue->stopped;
729 }
730
731 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
732 buffer = &tx_queue->buffer[insert_ptr];
733 ++tx_queue->insert_count;
734
735 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
736 tx_queue->read_count >
737 efx->type->txd_ring_mask);
738
739 efx_tsoh_free(tx_queue, buffer);
740 EFX_BUG_ON_PARANOID(buffer->len);
741 EFX_BUG_ON_PARANOID(buffer->unmap_len);
742 EFX_BUG_ON_PARANOID(buffer->skb);
743 EFX_BUG_ON_PARANOID(buffer->continuation != 1);
744 EFX_BUG_ON_PARANOID(buffer->tsoh);
745
746 buffer->dma_addr = dma_addr;
747
748 /* Ensure we do not cross a boundary unsupported by H/W */
749 dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
750
751 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
752 if (misalign && dma_len + misalign > 512)
753 dma_len = 512 - misalign;
754
755 /* If there is enough space to send then do so */
756 if (dma_len >= len)
757 break;
758
759 buffer->len = dma_len; /* Don't set the other members */
760 dma_addr += dma_len;
761 len -= dma_len;
762 }
763
764 EFX_BUG_ON_PARANOID(!len);
765 buffer->len = len;
766 buffer->skb = skb;
767 buffer->continuation = !end_of_packet;
768 buffer->unmap_addr = unmap_addr;
769 buffer->unmap_len = unmap_len;
770 return 0;
771}
772
773
774/*
775 * Put a TSO header into the TX queue.
776 *
777 * This is special-cased because we know that it is small enough to fit in
778 * a single fragment, and we know it doesn't cross a page boundary. It
779 * also allows us to not worry about end-of-packet etc.
780 */
781static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
782 struct efx_tso_header *tsoh, unsigned len)
783{
784 struct efx_tx_buffer *buffer;
785
786 buffer = &tx_queue->buffer[tx_queue->insert_count &
787 tx_queue->efx->type->txd_ring_mask];
788 efx_tsoh_free(tx_queue, buffer);
789 EFX_BUG_ON_PARANOID(buffer->len);
790 EFX_BUG_ON_PARANOID(buffer->unmap_len);
791 EFX_BUG_ON_PARANOID(buffer->skb);
792 EFX_BUG_ON_PARANOID(buffer->continuation != 1);
793 EFX_BUG_ON_PARANOID(buffer->tsoh);
794 buffer->len = len;
795 buffer->dma_addr = tsoh->dma_addr;
796 buffer->tsoh = tsoh;
797
798 ++tx_queue->insert_count;
799}
800
801
802/* Remove descriptors put into a tx_queue. */
803static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
804{
805 struct efx_tx_buffer *buffer;
806
807 /* Work backwards until we hit the original insert pointer value */
808 while (tx_queue->insert_count != tx_queue->write_count) {
809 --tx_queue->insert_count;
810 buffer = &tx_queue->buffer[tx_queue->insert_count &
811 tx_queue->efx->type->txd_ring_mask];
812 efx_tsoh_free(tx_queue, buffer);
813 EFX_BUG_ON_PARANOID(buffer->skb);
814 buffer->len = 0;
815 buffer->continuation = 1;
816 if (buffer->unmap_len) {
817 pci_unmap_page(tx_queue->efx->pci_dev,
818 buffer->unmap_addr,
819 buffer->unmap_len, PCI_DMA_TODEVICE);
820 buffer->unmap_len = 0;
821 }
822 }
823}
824
825
826/* Parse the SKB header and initialise state. */
827static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
828{
829 /* All ethernet/IP/TCP headers combined size is TCP header size
830 * plus offset of TCP header relative to start of packet.
831 */
832 st->p.header_length = ((tcp_hdr(skb)->doff << 2u)
833 + PTR_DIFF(tcp_hdr(skb), skb->data));
834 st->p.full_packet_size = (st->p.header_length
835 + skb_shinfo(skb)->gso_size);
836
837 st->p.ipv4_id = ntohs(ip_hdr(skb)->id);
838 st->seqnum = ntohl(tcp_hdr(skb)->seq);
839
840 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
841 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
842 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
843
844 st->packet_space = st->p.full_packet_size;
845 st->remaining_len = skb->len - st->p.header_length;
846}
847
848
849/**
850 * tso_get_fragment - record fragment details and map for DMA
851 * @st: TSO state
852 * @efx: Efx NIC
853 * @data: Pointer to fragment data
854 * @len: Length of fragment
855 *
856 * Record fragment details and map for DMA. Return 0 on success, or
857 * -%ENOMEM if DMA mapping fails.
858 */
859static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
860 int len, struct page *page, int page_off)
861{
862
863 st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
864 len, PCI_DMA_TODEVICE);
865 if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
866 st->ifc.unmap_len = len;
867 st->ifc.len = len;
868 st->ifc.dma_addr = st->ifc.unmap_addr;
869 st->ifc.page = page;
870 st->ifc.page_off = page_off;
871 return 0;
872 }
873 return -ENOMEM;
874}
875
876
877/**
878 * tso_fill_packet_with_fragment - form descriptors for the current fragment
879 * @tx_queue: Efx TX queue
880 * @skb: Socket buffer
881 * @st: TSO state
882 *
883 * Form descriptors for the current fragment, until we reach the end
884 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
885 * space in @tx_queue.
886 */
887static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
888 const struct sk_buff *skb,
889 struct tso_state *st)
890{
891
892 int n, end_of_packet, rc;
893
894 if (st->ifc.len == 0)
895 return 0;
896 if (st->packet_space == 0)
897 return 0;
898
899 EFX_BUG_ON_PARANOID(st->ifc.len <= 0);
900 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
901
902 n = min(st->ifc.len, st->packet_space);
903
904 st->packet_space -= n;
905 st->remaining_len -= n;
906 st->ifc.len -= n;
907 st->ifc.page_off += n;
908 end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
909
910 rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n,
911 st->remaining_len ? NULL : skb,
912 end_of_packet, st->ifc.unmap_addr,
913 st->ifc.len ? 0 : st->ifc.unmap_len);
914
915 st->ifc.dma_addr += n;
916
917 return rc;
918}
919
920
921/**
922 * tso_start_new_packet - generate a new header and prepare for the new packet
923 * @tx_queue: Efx TX queue
924 * @skb: Socket buffer
925 * @st: TSO state
926 *
927 * Generate a new header and prepare for the new packet. Return 0 on
928 * success, or -1 if failed to alloc header.
929 */
930static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
931 const struct sk_buff *skb,
932 struct tso_state *st)
933{
934 struct efx_tso_header *tsoh;
935 struct iphdr *tsoh_iph;
936 struct tcphdr *tsoh_th;
937 unsigned ip_length;
938 u8 *header;
939
940 /* Allocate a DMA-mapped header buffer. */
941 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
942 if (tx_queue->tso_headers_free == NULL)
943 if (efx_tsoh_block_alloc(tx_queue))
944 return -1;
945 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
946 tsoh = tx_queue->tso_headers_free;
947 tx_queue->tso_headers_free = tsoh->next;
948 tsoh->unmap_len = 0;
949 } else {
950 tx_queue->tso_long_headers++;
951 tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length);
952 if (unlikely(!tsoh))
953 return -1;
954 }
955
956 header = TSOH_BUFFER(tsoh);
957 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
958 tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
959
960 /* Copy and update the headers. */
961 memcpy(header, skb->data, st->p.header_length);
962
963 tsoh_th->seq = htonl(st->seqnum);
964 st->seqnum += skb_shinfo(skb)->gso_size;
965 if (st->remaining_len > skb_shinfo(skb)->gso_size) {
966 /* This packet will not finish the TSO burst. */
967 ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
968 tsoh_th->fin = 0;
969 tsoh_th->psh = 0;
970 } else {
971 /* This packet will be the last in the TSO burst. */
972 ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
973 + st->remaining_len);
974 tsoh_th->fin = tcp_hdr(skb)->fin;
975 tsoh_th->psh = tcp_hdr(skb)->psh;
976 }
977 tsoh_iph->tot_len = htons(ip_length);
978
979 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
980 tsoh_iph->id = htons(st->p.ipv4_id);
981 st->p.ipv4_id++;
982
983 st->packet_space = skb_shinfo(skb)->gso_size;
984 ++tx_queue->tso_packets;
985
986 /* Form a descriptor for this header. */
987 efx_tso_put_header(tx_queue, tsoh, st->p.header_length);
988
989 return 0;
990}
991
992
993/**
994 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
995 * @tx_queue: Efx TX queue
996 * @skb: Socket buffer
997 *
998 * Context: You must hold netif_tx_lock() to call this function.
999 *
1000 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1001 * @skb was not enqueued. In all cases @skb is consumed. Return
1002 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1003 */
1004static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1005 const struct sk_buff *skb)
1006{
1007 int frag_i, rc, rc2 = NETDEV_TX_OK;
1008 struct tso_state state;
1009 skb_frag_t *f;
1010
1011 /* Verify TSO is safe - these checks should never fail. */
1012 efx_tso_check_safe(skb);
1013
1014 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1015
1016 tso_start(&state, skb);
1017
1018 /* Assume that skb header area contains exactly the headers, and
1019 * all payload is in the frag list.
1020 */
1021 if (skb_headlen(skb) == state.p.header_length) {
1022 /* Grab the first payload fragment. */
1023 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1024 frag_i = 0;
1025 f = &skb_shinfo(skb)->frags[frag_i];
1026 rc = tso_get_fragment(&state, tx_queue->efx,
1027 f->size, f->page, f->page_offset);
1028 if (rc)
1029 goto mem_err;
1030 } else {
1031 /* It may look like this code fragment assumes that the
1032 * skb->data portion does not cross a page boundary, but
1033 * that is not the case. It is guaranteed to be direct
1034 * mapped memory, and therefore is physically contiguous,
1035 * and so DMA will work fine. kmap_atomic() on this region
1036 * will just return the direct mapping, so that will work
1037 * too.
1038 */
1039 int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
1040 int hl = state.p.header_length;
1041 rc = tso_get_fragment(&state, tx_queue->efx,
1042 skb_headlen(skb) - hl,
1043 virt_to_page(skb->data), page_off + hl);
1044 if (rc)
1045 goto mem_err;
1046 frag_i = -1;
1047 }
1048
1049 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1050 goto mem_err;
1051
1052 while (1) {
1053 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1054 if (unlikely(rc))
1055 goto stop;
1056
1057 /* Move onto the next fragment? */
1058 if (state.ifc.len == 0) {
1059 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1060 /* End of payload reached. */
1061 break;
1062 f = &skb_shinfo(skb)->frags[frag_i];
1063 rc = tso_get_fragment(&state, tx_queue->efx,
1064 f->size, f->page, f->page_offset);
1065 if (rc)
1066 goto mem_err;
1067 }
1068
1069 /* Start at new packet? */
1070 if (state.packet_space == 0 &&
1071 tso_start_new_packet(tx_queue, skb, &state) < 0)
1072 goto mem_err;
1073 }
1074
1075 /* Pass off to hardware */
1076 falcon_push_buffers(tx_queue);
1077
1078 tx_queue->tso_bursts++;
1079 return NETDEV_TX_OK;
1080
1081 mem_err:
1082 EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping"
1083 " error\n");
1084 dev_kfree_skb_any((struct sk_buff *)skb);
1085 goto unwind;
1086
1087 stop:
1088 rc2 = NETDEV_TX_BUSY;
1089
1090 /* Stop the queue if it wasn't stopped before. */
1091 if (tx_queue->stopped == 1)
1092 efx_stop_queue(tx_queue->efx);
1093
1094 unwind:
1095 efx_enqueue_unwind(tx_queue);
1096 return rc2;
1097}
1098
1099
1100/*
1101 * Free up all TSO datastructures associated with tx_queue. This
1102 * routine should be called only once the tx_queue is both empty and
1103 * will no longer be used.
1104 */
1105static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1106{
1107 unsigned i;
1108
1109 if (tx_queue->buffer)
1110 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
1111 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1112
1113 while (tx_queue->tso_headers_free != NULL)
1114 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1115 tx_queue->efx->pci_dev);
1116}
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index 66dd5bf1eaa9..3b9f9ddbc372 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -24,6 +24,10 @@
24 MDIO_MMDREG_DEVS0_PMAPMD | \ 24 MDIO_MMDREG_DEVS0_PMAPMD | \
25 MDIO_MMDREG_DEVS0_PHYXS) 25 MDIO_MMDREG_DEVS0_PHYXS)
26 26
27#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \
28 (1 << LOOPBACK_PMAPMD) | \
29 (1 << LOOPBACK_NETWORK))
30
27/****************************************************************************/ 31/****************************************************************************/
28/* Quake-specific MDIO registers */ 32/* Quake-specific MDIO registers */
29#define MDIO_QUAKE_LED0_REG (0xD006) 33#define MDIO_QUAKE_LED0_REG (0xD006)
@@ -35,6 +39,10 @@ void xfp_set_led(struct efx_nic *p, int led, int mode)
35 mode); 39 mode);
36} 40}
37 41
42struct xfp_phy_data {
43 int tx_disabled;
44};
45
38#define XFP_MAX_RESET_TIME 500 46#define XFP_MAX_RESET_TIME 500
39#define XFP_RESET_WAIT 10 47#define XFP_RESET_WAIT 10
40 48
@@ -72,18 +80,31 @@ static int xfp_reset_phy(struct efx_nic *efx)
72 80
73static int xfp_phy_init(struct efx_nic *efx) 81static int xfp_phy_init(struct efx_nic *efx)
74{ 82{
83 struct xfp_phy_data *phy_data;
75 u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); 84 u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
76 int rc; 85 int rc;
77 86
87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
88 efx->phy_data = (void *) phy_data;
89
78 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 90 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
79 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), 91 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
80 MDIO_ID_REV(devid)); 92 MDIO_ID_REV(devid));
81 93
94 phy_data->tx_disabled = efx->tx_disabled;
95
82 rc = xfp_reset_phy(efx); 96 rc = xfp_reset_phy(efx);
83 97
84 EFX_INFO(efx, "XFP: PHY init %s.\n", 98 EFX_INFO(efx, "XFP: PHY init %s.\n",
85 rc ? "failed" : "successful"); 99 rc ? "failed" : "successful");
100 if (rc < 0)
101 goto fail;
86 102
103 return 0;
104
105 fail:
106 kfree(efx->phy_data);
107 efx->phy_data = NULL;
87 return rc; 108 return rc;
88} 109}
89 110
@@ -110,6 +131,16 @@ static int xfp_phy_check_hw(struct efx_nic *efx)
110 131
111static void xfp_phy_reconfigure(struct efx_nic *efx) 132static void xfp_phy_reconfigure(struct efx_nic *efx)
112{ 133{
134 struct xfp_phy_data *phy_data = efx->phy_data;
135
136 /* Reset the PHY when moving from tx off to tx on */
137 if (phy_data->tx_disabled && !efx->tx_disabled)
138 xfp_reset_phy(efx);
139
140 mdio_clause45_transmit_disable(efx);
141 mdio_clause45_phy_reconfigure(efx);
142
143 phy_data->tx_disabled = efx->tx_disabled;
113 efx->link_up = xfp_link_ok(efx); 144 efx->link_up = xfp_link_ok(efx);
114 efx->link_options = GM_LPA_10000FULL; 145 efx->link_options = GM_LPA_10000FULL;
115} 146}
@@ -119,6 +150,10 @@ static void xfp_phy_fini(struct efx_nic *efx)
119{ 150{
120 /* Clobber the LED if it was blinking */ 151 /* Clobber the LED if it was blinking */
121 efx->board_info.blink(efx, 0); 152 efx->board_info.blink(efx, 0);
153
154 /* Free the context block */
155 kfree(efx->phy_data);
156 efx->phy_data = NULL;
122} 157}
123 158
124struct efx_phy_operations falcon_xfp_phy_ops = { 159struct efx_phy_operations falcon_xfp_phy_ops = {
@@ -129,4 +164,5 @@ struct efx_phy_operations falcon_xfp_phy_ops = {
129 .clear_interrupt = xfp_phy_clear_interrupt, 164 .clear_interrupt = xfp_phy_clear_interrupt,
130 .reset_xaui = efx_port_dummy_op_void, 165 .reset_xaui = efx_port_dummy_op_void,
131 .mmds = XFP_REQUIRED_DEVS, 166 .mmds = XFP_REQUIRED_DEVS,
167 .loopbacks = XFP_LOOPBACKS,
132}; 168};
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 7bb3ba9bcbd8..c0a5eea20007 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1966,13 +1966,13 @@ struct sky2_status_le {
1966struct tx_ring_info { 1966struct tx_ring_info {
1967 struct sk_buff *skb; 1967 struct sk_buff *skb;
1968 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1968 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1969 DECLARE_PCI_UNMAP_ADDR(maplen); 1969 DECLARE_PCI_UNMAP_LEN(maplen);
1970}; 1970};
1971 1971
1972struct rx_ring_info { 1972struct rx_ring_info {
1973 struct sk_buff *skb; 1973 struct sk_buff *skb;
1974 dma_addr_t data_addr; 1974 dma_addr_t data_addr;
1975 DECLARE_PCI_UNMAP_ADDR(data_size); 1975 DECLARE_PCI_UNMAP_LEN(data_size);
1976 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; 1976 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
1977}; 1977};
1978 1978
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a59c1f224aa8..2511ca7a12aa 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -434,10 +434,6 @@ static int uli526x_open(struct net_device *dev)
434 434
435 ULI526X_DBUG(0, "uli526x_open", 0); 435 ULI526X_DBUG(0, "uli526x_open", 0);
436 436
437 ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev);
438 if (ret)
439 return ret;
440
441 /* system variable init */ 437 /* system variable init */
442 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set; 438 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
443 db->tx_packet_cnt = 0; 439 db->tx_packet_cnt = 0;
@@ -456,6 +452,10 @@ static int uli526x_open(struct net_device *dev)
456 /* Initialize ULI526X board */ 452 /* Initialize ULI526X board */
457 uli526x_init(dev); 453 uli526x_init(dev);
458 454
455 ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev);
456 if (ret)
457 return ret;
458
459 /* Active System Interface */ 459 /* Active System Interface */
460 netif_wake_queue(dev); 460 netif_wake_queue(dev);
461 461
@@ -1368,6 +1368,12 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1368 * This setup frame initialize ULI526X address filter mode 1368 * This setup frame initialize ULI526X address filter mode
1369 */ 1369 */
1370 1370
1371#ifdef __BIG_ENDIAN
1372#define FLT_SHIFT 16
1373#else
1374#define FLT_SHIFT 0
1375#endif
1376
1371static void send_filter_frame(struct net_device *dev, int mc_cnt) 1377static void send_filter_frame(struct net_device *dev, int mc_cnt)
1372{ 1378{
1373 struct uli526x_board_info *db = netdev_priv(dev); 1379 struct uli526x_board_info *db = netdev_priv(dev);
@@ -1384,27 +1390,27 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1384 1390
1385 /* Node address */ 1391 /* Node address */
1386 addrptr = (u16 *) dev->dev_addr; 1392 addrptr = (u16 *) dev->dev_addr;
1387 *suptr++ = addrptr[0]; 1393 *suptr++ = addrptr[0] << FLT_SHIFT;
1388 *suptr++ = addrptr[1]; 1394 *suptr++ = addrptr[1] << FLT_SHIFT;
1389 *suptr++ = addrptr[2]; 1395 *suptr++ = addrptr[2] << FLT_SHIFT;
1390 1396
1391 /* broadcast address */ 1397 /* broadcast address */
1392 *suptr++ = 0xffff; 1398 *suptr++ = 0xffff << FLT_SHIFT;
1393 *suptr++ = 0xffff; 1399 *suptr++ = 0xffff << FLT_SHIFT;
1394 *suptr++ = 0xffff; 1400 *suptr++ = 0xffff << FLT_SHIFT;
1395 1401
1396 /* fit the multicast address */ 1402 /* fit the multicast address */
1397 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { 1403 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1398 addrptr = (u16 *) mcptr->dmi_addr; 1404 addrptr = (u16 *) mcptr->dmi_addr;
1399 *suptr++ = addrptr[0]; 1405 *suptr++ = addrptr[0] << FLT_SHIFT;
1400 *suptr++ = addrptr[1]; 1406 *suptr++ = addrptr[1] << FLT_SHIFT;
1401 *suptr++ = addrptr[2]; 1407 *suptr++ = addrptr[2] << FLT_SHIFT;
1402 } 1408 }
1403 1409
1404 for (; i<14; i++) { 1410 for (; i<14; i++) {
1405 *suptr++ = 0xffff; 1411 *suptr++ = 0xffff << FLT_SHIFT;
1406 *suptr++ = 0xffff; 1412 *suptr++ = 0xffff << FLT_SHIFT;
1407 *suptr++ = 0xffff; 1413 *suptr++ = 0xffff << FLT_SHIFT;
1408 } 1414 }
1409 1415
1410 /* prepare the setup frame */ 1416 /* prepare the setup frame */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 281ce3d39532..ca0bdac07a78 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -62,7 +62,6 @@
62#endif /* UGETH_VERBOSE_DEBUG */ 62#endif /* UGETH_VERBOSE_DEBUG */
63#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 63#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
64 64
65void uec_set_ethtool_ops(struct net_device *netdev);
66 65
67static DEFINE_SPINLOCK(ugeth_lock); 66static DEFINE_SPINLOCK(ugeth_lock);
68 67
@@ -216,7 +215,8 @@ static struct list_head *dequeue(struct list_head *lh)
216 } 215 }
217} 216}
218 217
219static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) 218static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
219 u8 __iomem *bd)
220{ 220{
221 struct sk_buff *skb = NULL; 221 struct sk_buff *skb = NULL;
222 222
@@ -236,21 +236,22 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd)
236 236
237 skb->dev = ugeth->dev; 237 skb->dev = ugeth->dev;
238 238
239 out_be32(&((struct qe_bd *)bd)->buf, 239 out_be32(&((struct qe_bd __iomem *)bd)->buf,
240 dma_map_single(NULL, 240 dma_map_single(NULL,
241 skb->data, 241 skb->data,
242 ugeth->ug_info->uf_info.max_rx_buf_length + 242 ugeth->ug_info->uf_info.max_rx_buf_length +
243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
244 DMA_FROM_DEVICE)); 244 DMA_FROM_DEVICE));
245 245
246 out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W))); 246 out_be32((u32 __iomem *)bd,
247 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
247 248
248 return skb; 249 return skb;
249} 250}
250 251
251static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 252static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
252{ 253{
253 u8 *bd; 254 u8 __iomem *bd;
254 u32 bd_status; 255 u32 bd_status;
255 struct sk_buff *skb; 256 struct sk_buff *skb;
256 int i; 257 int i;
@@ -259,7 +260,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
259 i = 0; 260 i = 0;
260 261
261 do { 262 do {
262 bd_status = in_be32((u32*)bd); 263 bd_status = in_be32((u32 __iomem *)bd);
263 skb = get_new_skb(ugeth, bd); 264 skb = get_new_skb(ugeth, bd);
264 265
265 if (!skb) /* If can not allocate data buffer, 266 if (!skb) /* If can not allocate data buffer,
@@ -277,7 +278,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
277} 278}
278 279
279static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 280static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
280 volatile u32 *p_start, 281 u32 *p_start,
281 u8 num_entries, 282 u8 num_entries,
282 u32 thread_size, 283 u32 thread_size,
283 u32 thread_alignment, 284 u32 thread_alignment,
@@ -316,7 +317,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
316} 317}
317 318
318static int return_init_enet_entries(struct ucc_geth_private *ugeth, 319static int return_init_enet_entries(struct ucc_geth_private *ugeth,
319 volatile u32 *p_start, 320 u32 *p_start,
320 u8 num_entries, 321 u8 num_entries,
321 enum qe_risc_allocation risc, 322 enum qe_risc_allocation risc,
322 int skip_page_for_first_entry) 323 int skip_page_for_first_entry)
@@ -326,21 +327,22 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth,
326 int snum; 327 int snum;
327 328
328 for (i = 0; i < num_entries; i++) { 329 for (i = 0; i < num_entries; i++) {
330 u32 val = *p_start;
331
329 /* Check that this entry was actually valid -- 332 /* Check that this entry was actually valid --
330 needed in case failed in allocations */ 333 needed in case failed in allocations */
331 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { 334 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
332 snum = 335 snum =
333 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> 336 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
334 ENET_INIT_PARAM_SNUM_SHIFT; 337 ENET_INIT_PARAM_SNUM_SHIFT;
335 qe_put_snum((u8) snum); 338 qe_put_snum((u8) snum);
336 if (!((i == 0) && skip_page_for_first_entry)) { 339 if (!((i == 0) && skip_page_for_first_entry)) {
337 /* First entry of Rx does not have page */ 340 /* First entry of Rx does not have page */
338 init_enet_offset = 341 init_enet_offset =
339 (in_be32(p_start) & 342 (val & ENET_INIT_PARAM_PTR_MASK);
340 ENET_INIT_PARAM_PTR_MASK);
341 qe_muram_free(init_enet_offset); 343 qe_muram_free(init_enet_offset);
342 } 344 }
343 *(p_start++) = 0; /* Just for cosmetics */ 345 *p_start++ = 0;
344 } 346 }
345 } 347 }
346 348
@@ -349,7 +351,7 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth,
349 351
350#ifdef DEBUG 352#ifdef DEBUG
351static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 353static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
352 volatile u32 *p_start, 354 u32 __iomem *p_start,
353 u8 num_entries, 355 u8 num_entries,
354 u32 thread_size, 356 u32 thread_size,
355 enum qe_risc_allocation risc, 357 enum qe_risc_allocation risc,
@@ -360,11 +362,13 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
360 int snum; 362 int snum;
361 363
362 for (i = 0; i < num_entries; i++) { 364 for (i = 0; i < num_entries; i++) {
365 u32 val = in_be32(p_start);
366
363 /* Check that this entry was actually valid -- 367 /* Check that this entry was actually valid --
364 needed in case failed in allocations */ 368 needed in case failed in allocations */
365 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { 369 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
366 snum = 370 snum =
367 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> 371 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
368 ENET_INIT_PARAM_SNUM_SHIFT; 372 ENET_INIT_PARAM_SNUM_SHIFT;
369 qe_put_snum((u8) snum); 373 qe_put_snum((u8) snum);
370 if (!((i == 0) && skip_page_for_first_entry)) { 374 if (!((i == 0) && skip_page_for_first_entry)) {
@@ -440,7 +444,7 @@ static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
440 444
441static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 445static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
442{ 446{
443 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
444 448
445 if (!(paddr_num < NUM_OF_PADDRS)) { 449 if (!(paddr_num < NUM_OF_PADDRS)) {
446 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); 450 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
@@ -448,7 +452,7 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
448 } 452 }
449 453
450 p_82xx_addr_filt = 454 p_82xx_addr_filt =
451 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 455 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
452 addressfiltering; 456 addressfiltering;
453 457
454 /* Writing address ff.ff.ff.ff.ff.ff disables address 458 /* Writing address ff.ff.ff.ff.ff.ff disables address
@@ -463,11 +467,11 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
463static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 467static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
464 u8 *p_enet_addr) 468 u8 *p_enet_addr)
465{ 469{
466 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 470 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
467 u32 cecr_subblock; 471 u32 cecr_subblock;
468 472
469 p_82xx_addr_filt = 473 p_82xx_addr_filt =
470 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 474 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
471 addressfiltering; 475 addressfiltering;
472 476
473 cecr_subblock = 477 cecr_subblock =
@@ -487,7 +491,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
487static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) 491static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
488{ 492{
489 struct ucc_fast_private *uccf; 493 struct ucc_fast_private *uccf;
490 struct ucc_geth *ug_regs; 494 struct ucc_geth __iomem *ug_regs;
491 u32 maccfg2, uccm; 495 u32 maccfg2, uccm;
492 496
493 uccf = ugeth->uccf; 497 uccf = ugeth->uccf;
@@ -507,7 +511,7 @@ static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
507static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) 511static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
508{ 512{
509 struct ucc_fast_private *uccf; 513 struct ucc_fast_private *uccf;
510 struct ucc_geth *ug_regs; 514 struct ucc_geth __iomem *ug_regs;
511 u32 maccfg2, uccm; 515 u32 maccfg2, uccm;
512 516
513 uccf = ugeth->uccf; 517 uccf = ugeth->uccf;
@@ -538,13 +542,13 @@ static void get_statistics(struct ucc_geth_private *ugeth,
538 rx_firmware_statistics, 542 rx_firmware_statistics,
539 struct ucc_geth_hardware_statistics *hardware_statistics) 543 struct ucc_geth_hardware_statistics *hardware_statistics)
540{ 544{
541 struct ucc_fast *uf_regs; 545 struct ucc_fast __iomem *uf_regs;
542 struct ucc_geth *ug_regs; 546 struct ucc_geth __iomem *ug_regs;
543 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 547 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
544 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 548 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
545 549
546 ug_regs = ugeth->ug_regs; 550 ug_regs = ugeth->ug_regs;
547 uf_regs = (struct ucc_fast *) ug_regs; 551 uf_regs = (struct ucc_fast __iomem *) ug_regs;
548 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 552 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
549 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 553 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
550 554
@@ -1132,9 +1136,9 @@ static void dump_regs(struct ucc_geth_private *ugeth)
1132} 1136}
1133#endif /* DEBUG */ 1137#endif /* DEBUG */
1134 1138
1135static void init_default_reg_vals(volatile u32 *upsmr_register, 1139static void init_default_reg_vals(u32 __iomem *upsmr_register,
1136 volatile u32 *maccfg1_register, 1140 u32 __iomem *maccfg1_register,
1137 volatile u32 *maccfg2_register) 1141 u32 __iomem *maccfg2_register)
1138{ 1142{
1139 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); 1143 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1140 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); 1144 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
@@ -1148,7 +1152,7 @@ static int init_half_duplex_params(int alt_beb,
1148 u8 alt_beb_truncation, 1152 u8 alt_beb_truncation,
1149 u8 max_retransmissions, 1153 u8 max_retransmissions,
1150 u8 collision_window, 1154 u8 collision_window,
1151 volatile u32 *hafdup_register) 1155 u32 __iomem *hafdup_register)
1152{ 1156{
1153 u32 value = 0; 1157 u32 value = 0;
1154 1158
@@ -1180,7 +1184,7 @@ static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1180 u8 non_btb_ipg, 1184 u8 non_btb_ipg,
1181 u8 min_ifg, 1185 u8 min_ifg,
1182 u8 btb_ipg, 1186 u8 btb_ipg,
1183 volatile u32 *ipgifg_register) 1187 u32 __iomem *ipgifg_register)
1184{ 1188{
1185 u32 value = 0; 1189 u32 value = 0;
1186 1190
@@ -1215,9 +1219,9 @@ int init_flow_control_params(u32 automatic_flow_control_mode,
1215 int tx_flow_control_enable, 1219 int tx_flow_control_enable,
1216 u16 pause_period, 1220 u16 pause_period,
1217 u16 extension_field, 1221 u16 extension_field,
1218 volatile u32 *upsmr_register, 1222 u32 __iomem *upsmr_register,
1219 volatile u32 *uempr_register, 1223 u32 __iomem *uempr_register,
1220 volatile u32 *maccfg1_register) 1224 u32 __iomem *maccfg1_register)
1221{ 1225{
1222 u32 value = 0; 1226 u32 value = 0;
1223 1227
@@ -1243,8 +1247,8 @@ int init_flow_control_params(u32 automatic_flow_control_mode,
1243 1247
1244static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, 1248static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1245 int auto_zero_hardware_statistics, 1249 int auto_zero_hardware_statistics,
1246 volatile u32 *upsmr_register, 1250 u32 __iomem *upsmr_register,
1247 volatile u16 *uescr_register) 1251 u16 __iomem *uescr_register)
1248{ 1252{
1249 u32 upsmr_value = 0; 1253 u32 upsmr_value = 0;
1250 u16 uescr_value = 0; 1254 u16 uescr_value = 0;
@@ -1270,12 +1274,12 @@ static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1270static int init_firmware_statistics_gathering_mode(int 1274static int init_firmware_statistics_gathering_mode(int
1271 enable_tx_firmware_statistics, 1275 enable_tx_firmware_statistics,
1272 int enable_rx_firmware_statistics, 1276 int enable_rx_firmware_statistics,
1273 volatile u32 *tx_rmon_base_ptr, 1277 u32 __iomem *tx_rmon_base_ptr,
1274 u32 tx_firmware_statistics_structure_address, 1278 u32 tx_firmware_statistics_structure_address,
1275 volatile u32 *rx_rmon_base_ptr, 1279 u32 __iomem *rx_rmon_base_ptr,
1276 u32 rx_firmware_statistics_structure_address, 1280 u32 rx_firmware_statistics_structure_address,
1277 volatile u16 *temoder_register, 1281 u16 __iomem *temoder_register,
1278 volatile u32 *remoder_register) 1282 u32 __iomem *remoder_register)
1279{ 1283{
1280 /* Note: this function does not check if */ 1284 /* Note: this function does not check if */
1281 /* the parameters it receives are NULL */ 1285 /* the parameters it receives are NULL */
@@ -1307,8 +1311,8 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
1307 u8 address_byte_3, 1311 u8 address_byte_3,
1308 u8 address_byte_4, 1312 u8 address_byte_4,
1309 u8 address_byte_5, 1313 u8 address_byte_5,
1310 volatile u32 *macstnaddr1_register, 1314 u32 __iomem *macstnaddr1_register,
1311 volatile u32 *macstnaddr2_register) 1315 u32 __iomem *macstnaddr2_register)
1312{ 1316{
1313 u32 value = 0; 1317 u32 value = 0;
1314 1318
@@ -1344,7 +1348,7 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
1344} 1348}
1345 1349
1346static int init_check_frame_length_mode(int length_check, 1350static int init_check_frame_length_mode(int length_check,
1347 volatile u32 *maccfg2_register) 1351 u32 __iomem *maccfg2_register)
1348{ 1352{
1349 u32 value = 0; 1353 u32 value = 0;
1350 1354
@@ -1360,7 +1364,7 @@ static int init_check_frame_length_mode(int length_check,
1360} 1364}
1361 1365
1362static int init_preamble_length(u8 preamble_length, 1366static int init_preamble_length(u8 preamble_length,
1363 volatile u32 *maccfg2_register) 1367 u32 __iomem *maccfg2_register)
1364{ 1368{
1365 u32 value = 0; 1369 u32 value = 0;
1366 1370
@@ -1376,7 +1380,7 @@ static int init_preamble_length(u8 preamble_length,
1376 1380
1377static int init_rx_parameters(int reject_broadcast, 1381static int init_rx_parameters(int reject_broadcast,
1378 int receive_short_frames, 1382 int receive_short_frames,
1379 int promiscuous, volatile u32 *upsmr_register) 1383 int promiscuous, u32 __iomem *upsmr_register)
1380{ 1384{
1381 u32 value = 0; 1385 u32 value = 0;
1382 1386
@@ -1403,7 +1407,7 @@ static int init_rx_parameters(int reject_broadcast,
1403} 1407}
1404 1408
1405static int init_max_rx_buff_len(u16 max_rx_buf_len, 1409static int init_max_rx_buff_len(u16 max_rx_buf_len,
1406 volatile u16 *mrblr_register) 1410 u16 __iomem *mrblr_register)
1407{ 1411{
1408 /* max_rx_buf_len value must be a multiple of 128 */ 1412 /* max_rx_buf_len value must be a multiple of 128 */
1409 if ((max_rx_buf_len == 0) 1413 if ((max_rx_buf_len == 0)
@@ -1415,8 +1419,8 @@ static int init_max_rx_buff_len(u16 max_rx_buf_len,
1415} 1419}
1416 1420
1417static int init_min_frame_len(u16 min_frame_length, 1421static int init_min_frame_len(u16 min_frame_length,
1418 volatile u16 *minflr_register, 1422 u16 __iomem *minflr_register,
1419 volatile u16 *mrblr_register) 1423 u16 __iomem *mrblr_register)
1420{ 1424{
1421 u16 mrblr_value = 0; 1425 u16 mrblr_value = 0;
1422 1426
@@ -1431,8 +1435,8 @@ static int init_min_frame_len(u16 min_frame_length,
1431static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1435static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1432{ 1436{
1433 struct ucc_geth_info *ug_info; 1437 struct ucc_geth_info *ug_info;
1434 struct ucc_geth *ug_regs; 1438 struct ucc_geth __iomem *ug_regs;
1435 struct ucc_fast *uf_regs; 1439 struct ucc_fast __iomem *uf_regs;
1436 int ret_val; 1440 int ret_val;
1437 u32 upsmr, maccfg2, tbiBaseAddress; 1441 u32 upsmr, maccfg2, tbiBaseAddress;
1438 u16 value; 1442 u16 value;
@@ -1517,8 +1521,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1517static void adjust_link(struct net_device *dev) 1521static void adjust_link(struct net_device *dev)
1518{ 1522{
1519 struct ucc_geth_private *ugeth = netdev_priv(dev); 1523 struct ucc_geth_private *ugeth = netdev_priv(dev);
1520 struct ucc_geth *ug_regs; 1524 struct ucc_geth __iomem *ug_regs;
1521 struct ucc_fast *uf_regs; 1525 struct ucc_fast __iomem *uf_regs;
1522 struct phy_device *phydev = ugeth->phydev; 1526 struct phy_device *phydev = ugeth->phydev;
1523 unsigned long flags; 1527 unsigned long flags;
1524 int new_state = 0; 1528 int new_state = 0;
@@ -1678,9 +1682,9 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1678 uccf = ugeth->uccf; 1682 uccf = ugeth->uccf;
1679 1683
1680 /* Clear acknowledge bit */ 1684 /* Clear acknowledge bit */
1681 temp = ugeth->p_rx_glbl_pram->rxgstpack; 1685 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1682 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 1686 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1683 ugeth->p_rx_glbl_pram->rxgstpack = temp; 1687 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
1684 1688
1685 /* Keep issuing command and checking acknowledge bit until 1689 /* Keep issuing command and checking acknowledge bit until
1686 it is asserted, according to spec */ 1690 it is asserted, according to spec */
@@ -1692,7 +1696,7 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1692 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1696 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1693 QE_CR_PROTOCOL_ETHERNET, 0); 1697 QE_CR_PROTOCOL_ETHERNET, 0);
1694 1698
1695 temp = ugeth->p_rx_glbl_pram->rxgstpack; 1699 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1696 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); 1700 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1697 1701
1698 uccf->stopped_rx = 1; 1702 uccf->stopped_rx = 1;
@@ -1991,19 +1995,20 @@ static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
1991 enum enet_addr_type 1995 enum enet_addr_type
1992 enet_addr_type) 1996 enet_addr_type)
1993{ 1997{
1994 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 1998 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
1995 struct ucc_fast_private *uccf; 1999 struct ucc_fast_private *uccf;
1996 enum comm_dir comm_dir; 2000 enum comm_dir comm_dir;
1997 struct list_head *p_lh; 2001 struct list_head *p_lh;
1998 u16 i, num; 2002 u16 i, num;
1999 u32 *addr_h, *addr_l; 2003 u32 __iomem *addr_h;
2004 u32 __iomem *addr_l;
2000 u8 *p_counter; 2005 u8 *p_counter;
2001 2006
2002 uccf = ugeth->uccf; 2007 uccf = ugeth->uccf;
2003 2008
2004 p_82xx_addr_filt = 2009 p_82xx_addr_filt =
2005 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> 2010 (struct ucc_geth_82xx_address_filtering_pram __iomem *)
2006 addressfiltering; 2011 ugeth->p_rx_glbl_pram->addressfiltering;
2007 2012
2008 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { 2013 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2009 addr_h = &(p_82xx_addr_filt->gaddr_h); 2014 addr_h = &(p_82xx_addr_filt->gaddr_h);
@@ -2079,7 +2084,7 @@ static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *uge
2079static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 2084static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2080{ 2085{
2081 u16 i, j; 2086 u16 i, j;
2082 u8 *bd; 2087 u8 __iomem *bd;
2083 2088
2084 if (!ugeth) 2089 if (!ugeth)
2085 return; 2090 return;
@@ -2154,8 +2159,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2154 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2155 if (ugeth->tx_skbuff[i][j]) { 2160 if (ugeth->tx_skbuff[i][j]) {
2156 dma_unmap_single(NULL, 2161 dma_unmap_single(NULL,
2157 ((struct qe_bd *)bd)->buf, 2162 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2158 (in_be32((u32 *)bd) & 2163 (in_be32((u32 __iomem *)bd) &
2159 BD_LENGTH_MASK), 2164 BD_LENGTH_MASK),
2160 DMA_TO_DEVICE); 2165 DMA_TO_DEVICE);
2161 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); 2166 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
@@ -2182,7 +2187,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2182 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2183 if (ugeth->rx_skbuff[i][j]) { 2188 if (ugeth->rx_skbuff[i][j]) {
2184 dma_unmap_single(NULL, 2189 dma_unmap_single(NULL,
2185 ((struct qe_bd *)bd)->buf, 2190 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2186 ugeth->ug_info-> 2191 ugeth->ug_info->
2187 uf_info.max_rx_buf_length + 2192 uf_info.max_rx_buf_length +
2188 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 2193 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
@@ -2218,8 +2223,8 @@ static void ucc_geth_set_multi(struct net_device *dev)
2218{ 2223{
2219 struct ucc_geth_private *ugeth; 2224 struct ucc_geth_private *ugeth;
2220 struct dev_mc_list *dmi; 2225 struct dev_mc_list *dmi;
2221 struct ucc_fast *uf_regs; 2226 struct ucc_fast __iomem *uf_regs;
2222 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2227 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2223 int i; 2228 int i;
2224 2229
2225 ugeth = netdev_priv(dev); 2230 ugeth = netdev_priv(dev);
@@ -2228,14 +2233,14 @@ static void ucc_geth_set_multi(struct net_device *dev)
2228 2233
2229 if (dev->flags & IFF_PROMISC) { 2234 if (dev->flags & IFF_PROMISC) {
2230 2235
2231 uf_regs->upsmr |= UPSMR_PRO; 2236 out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO);
2232 2237
2233 } else { 2238 } else {
2234 2239
2235 uf_regs->upsmr &= ~UPSMR_PRO; 2240 out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO);
2236 2241
2237 p_82xx_addr_filt = 2242 p_82xx_addr_filt =
2238 (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 2243 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
2239 p_rx_glbl_pram->addressfiltering; 2244 p_rx_glbl_pram->addressfiltering;
2240 2245
2241 if (dev->flags & IFF_ALLMULTI) { 2246 if (dev->flags & IFF_ALLMULTI) {
@@ -2270,7 +2275,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
2270 2275
2271static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2276static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2272{ 2277{
2273 struct ucc_geth *ug_regs = ugeth->ug_regs; 2278 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
2274 struct phy_device *phydev = ugeth->phydev; 2279 struct phy_device *phydev = ugeth->phydev;
2275 u32 tempval; 2280 u32 tempval;
2276 2281
@@ -2419,20 +2424,20 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2419 return -ENOMEM; 2424 return -ENOMEM;
2420 } 2425 }
2421 2426
2422 ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); 2427 ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth));
2423 2428
2424 return 0; 2429 return 0;
2425} 2430}
2426 2431
2427static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2432static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2428{ 2433{
2429 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 2434 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2430 struct ucc_geth_init_pram *p_init_enet_pram; 2435 struct ucc_geth_init_pram __iomem *p_init_enet_pram;
2431 struct ucc_fast_private *uccf; 2436 struct ucc_fast_private *uccf;
2432 struct ucc_geth_info *ug_info; 2437 struct ucc_geth_info *ug_info;
2433 struct ucc_fast_info *uf_info; 2438 struct ucc_fast_info *uf_info;
2434 struct ucc_fast *uf_regs; 2439 struct ucc_fast __iomem *uf_regs;
2435 struct ucc_geth *ug_regs; 2440 struct ucc_geth __iomem *ug_regs;
2436 int ret_val = -EINVAL; 2441 int ret_val = -EINVAL;
2437 u32 remoder = UCC_GETH_REMODER_INIT; 2442 u32 remoder = UCC_GETH_REMODER_INIT;
2438 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; 2443 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
@@ -2440,7 +2445,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2440 u16 temoder = UCC_GETH_TEMODER_INIT; 2445 u16 temoder = UCC_GETH_TEMODER_INIT;
2441 u16 test; 2446 u16 test;
2442 u8 function_code = 0; 2447 u8 function_code = 0;
2443 u8 *bd, *endOfRing; 2448 u8 __iomem *bd;
2449 u8 __iomem *endOfRing;
2444 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2450 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2445 2451
2446 ugeth_vdbg("%s: IN", __FUNCTION__); 2452 ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -2602,11 +2608,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2602 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2608 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2603 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2609 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2604 ugeth->tx_bd_ring_offset[j] = 2610 ugeth->tx_bd_ring_offset[j] =
2605 kmalloc((u32) (length + align), GFP_KERNEL); 2611 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2606 2612
2607 if (ugeth->tx_bd_ring_offset[j] != 0) 2613 if (ugeth->tx_bd_ring_offset[j] != 0)
2608 ugeth->p_tx_bd_ring[j] = 2614 ugeth->p_tx_bd_ring[j] =
2609 (void*)((ugeth->tx_bd_ring_offset[j] + 2615 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
2610 align) & ~(align - 1)); 2616 align) & ~(align - 1));
2611 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2617 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2612 ugeth->tx_bd_ring_offset[j] = 2618 ugeth->tx_bd_ring_offset[j] =
@@ -2614,7 +2620,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2614 UCC_GETH_TX_BD_RING_ALIGNMENT); 2620 UCC_GETH_TX_BD_RING_ALIGNMENT);
2615 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2621 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
2616 ugeth->p_tx_bd_ring[j] = 2622 ugeth->p_tx_bd_ring[j] =
2617 (u8 *) qe_muram_addr(ugeth-> 2623 (u8 __iomem *) qe_muram_addr(ugeth->
2618 tx_bd_ring_offset[j]); 2624 tx_bd_ring_offset[j]);
2619 } 2625 }
2620 if (!ugeth->p_tx_bd_ring[j]) { 2626 if (!ugeth->p_tx_bd_ring[j]) {
@@ -2626,8 +2632,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2626 return -ENOMEM; 2632 return -ENOMEM;
2627 } 2633 }
2628 /* Zero unused end of bd ring, according to spec */ 2634 /* Zero unused end of bd ring, according to spec */
2629 memset(ugeth->p_tx_bd_ring[j] + 2635 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
2630 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0, 2636 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
2631 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2637 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2632 } 2638 }
2633 2639
@@ -2639,10 +2645,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2639 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2645 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2640 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2646 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2641 ugeth->rx_bd_ring_offset[j] = 2647 ugeth->rx_bd_ring_offset[j] =
2642 kmalloc((u32) (length + align), GFP_KERNEL); 2648 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2643 if (ugeth->rx_bd_ring_offset[j] != 0) 2649 if (ugeth->rx_bd_ring_offset[j] != 0)
2644 ugeth->p_rx_bd_ring[j] = 2650 ugeth->p_rx_bd_ring[j] =
2645 (void*)((ugeth->rx_bd_ring_offset[j] + 2651 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
2646 align) & ~(align - 1)); 2652 align) & ~(align - 1));
2647 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2653 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2648 ugeth->rx_bd_ring_offset[j] = 2654 ugeth->rx_bd_ring_offset[j] =
@@ -2650,7 +2656,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2650 UCC_GETH_RX_BD_RING_ALIGNMENT); 2656 UCC_GETH_RX_BD_RING_ALIGNMENT);
2651 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2657 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
2652 ugeth->p_rx_bd_ring[j] = 2658 ugeth->p_rx_bd_ring[j] =
2653 (u8 *) qe_muram_addr(ugeth-> 2659 (u8 __iomem *) qe_muram_addr(ugeth->
2654 rx_bd_ring_offset[j]); 2660 rx_bd_ring_offset[j]);
2655 } 2661 }
2656 if (!ugeth->p_rx_bd_ring[j]) { 2662 if (!ugeth->p_rx_bd_ring[j]) {
@@ -2685,14 +2691,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2685 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2691 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2686 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2692 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2687 /* clear bd buffer */ 2693 /* clear bd buffer */
2688 out_be32(&((struct qe_bd *)bd)->buf, 0); 2694 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2689 /* set bd status and length */ 2695 /* set bd status and length */
2690 out_be32((u32 *)bd, 0); 2696 out_be32((u32 __iomem *)bd, 0);
2691 bd += sizeof(struct qe_bd); 2697 bd += sizeof(struct qe_bd);
2692 } 2698 }
2693 bd -= sizeof(struct qe_bd); 2699 bd -= sizeof(struct qe_bd);
2694 /* set bd status and length */ 2700 /* set bd status and length */
2695 out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */ 2701 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
2696 } 2702 }
2697 2703
2698 /* Init Rx bds */ 2704 /* Init Rx bds */
@@ -2717,14 +2723,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2717 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2723 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2718 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2724 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2719 /* set bd status and length */ 2725 /* set bd status and length */
2720 out_be32((u32 *)bd, R_I); 2726 out_be32((u32 __iomem *)bd, R_I);
2721 /* clear bd buffer */ 2727 /* clear bd buffer */
2722 out_be32(&((struct qe_bd *)bd)->buf, 0); 2728 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2723 bd += sizeof(struct qe_bd); 2729 bd += sizeof(struct qe_bd);
2724 } 2730 }
2725 bd -= sizeof(struct qe_bd); 2731 bd -= sizeof(struct qe_bd);
2726 /* set bd status and length */ 2732 /* set bd status and length */
2727 out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */ 2733 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
2728 } 2734 }
2729 2735
2730 /* 2736 /*
@@ -2744,10 +2750,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2744 return -ENOMEM; 2750 return -ENOMEM;
2745 } 2751 }
2746 ugeth->p_tx_glbl_pram = 2752 ugeth->p_tx_glbl_pram =
2747 (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth-> 2753 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
2748 tx_glbl_pram_offset); 2754 tx_glbl_pram_offset);
2749 /* Zero out p_tx_glbl_pram */ 2755 /* Zero out p_tx_glbl_pram */
2750 memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 2756 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
2751 2757
2752 /* Fill global PRAM */ 2758 /* Fill global PRAM */
2753 2759
@@ -2768,7 +2774,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2768 } 2774 }
2769 2775
2770 ugeth->p_thread_data_tx = 2776 ugeth->p_thread_data_tx =
2771 (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth-> 2777 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
2772 thread_dat_tx_offset); 2778 thread_dat_tx_offset);
2773 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 2779 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
2774 2780
@@ -2779,7 +2785,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2779 2785
2780 /* iphoffset */ 2786 /* iphoffset */
2781 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) 2787 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
2782 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; 2788 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
2789 ug_info->iphoffset[i]);
2783 2790
2784 /* SQPTR */ 2791 /* SQPTR */
2785 /* Size varies with number of Tx queues */ 2792 /* Size varies with number of Tx queues */
@@ -2797,7 +2804,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2797 } 2804 }
2798 2805
2799 ugeth->p_send_q_mem_reg = 2806 ugeth->p_send_q_mem_reg =
2800 (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth-> 2807 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
2801 send_q_mem_reg_offset); 2808 send_q_mem_reg_offset);
2802 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 2809 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
2803 2810
@@ -2841,25 +2848,26 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2841 } 2848 }
2842 2849
2843 ugeth->p_scheduler = 2850 ugeth->p_scheduler =
2844 (struct ucc_geth_scheduler *) qe_muram_addr(ugeth-> 2851 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
2845 scheduler_offset); 2852 scheduler_offset);
2846 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 2853 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
2847 ugeth->scheduler_offset); 2854 ugeth->scheduler_offset);
2848 /* Zero out p_scheduler */ 2855 /* Zero out p_scheduler */
2849 memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 2856 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
2850 2857
2851 /* Set values in scheduler */ 2858 /* Set values in scheduler */
2852 out_be32(&ugeth->p_scheduler->mblinterval, 2859 out_be32(&ugeth->p_scheduler->mblinterval,
2853 ug_info->mblinterval); 2860 ug_info->mblinterval);
2854 out_be16(&ugeth->p_scheduler->nortsrbytetime, 2861 out_be16(&ugeth->p_scheduler->nortsrbytetime,
2855 ug_info->nortsrbytetime); 2862 ug_info->nortsrbytetime);
2856 ugeth->p_scheduler->fracsiz = ug_info->fracsiz; 2863 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
2857 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; 2864 out_8(&ugeth->p_scheduler->strictpriorityq,
2858 ugeth->p_scheduler->txasap = ug_info->txasap; 2865 ug_info->strictpriorityq);
2859 ugeth->p_scheduler->extrabw = ug_info->extrabw; 2866 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
2867 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
2860 for (i = 0; i < NUM_TX_QUEUES; i++) 2868 for (i = 0; i < NUM_TX_QUEUES; i++)
2861 ugeth->p_scheduler->weightfactor[i] = 2869 out_8(&ugeth->p_scheduler->weightfactor[i],
2862 ug_info->weightfactor[i]; 2870 ug_info->weightfactor[i]);
2863 2871
2864 /* Set pointers to cpucount registers in scheduler */ 2872 /* Set pointers to cpucount registers in scheduler */
2865 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); 2873 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
@@ -2890,10 +2898,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2890 return -ENOMEM; 2898 return -ENOMEM;
2891 } 2899 }
2892 ugeth->p_tx_fw_statistics_pram = 2900 ugeth->p_tx_fw_statistics_pram =
2893 (struct ucc_geth_tx_firmware_statistics_pram *) 2901 (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
2894 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 2902 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
2895 /* Zero out p_tx_fw_statistics_pram */ 2903 /* Zero out p_tx_fw_statistics_pram */
2896 memset(ugeth->p_tx_fw_statistics_pram, 2904 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
2897 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 2905 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
2898 } 2906 }
2899 2907
@@ -2930,10 +2938,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2930 return -ENOMEM; 2938 return -ENOMEM;
2931 } 2939 }
2932 ugeth->p_rx_glbl_pram = 2940 ugeth->p_rx_glbl_pram =
2933 (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth-> 2941 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
2934 rx_glbl_pram_offset); 2942 rx_glbl_pram_offset);
2935 /* Zero out p_rx_glbl_pram */ 2943 /* Zero out p_rx_glbl_pram */
2936 memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 2944 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
2937 2945
2938 /* Fill global PRAM */ 2946 /* Fill global PRAM */
2939 2947
@@ -2953,7 +2961,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2953 } 2961 }
2954 2962
2955 ugeth->p_thread_data_rx = 2963 ugeth->p_thread_data_rx =
2956 (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth-> 2964 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
2957 thread_dat_rx_offset); 2965 thread_dat_rx_offset);
2958 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 2966 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
2959 2967
@@ -2976,10 +2984,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2976 return -ENOMEM; 2984 return -ENOMEM;
2977 } 2985 }
2978 ugeth->p_rx_fw_statistics_pram = 2986 ugeth->p_rx_fw_statistics_pram =
2979 (struct ucc_geth_rx_firmware_statistics_pram *) 2987 (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
2980 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 2988 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
2981 /* Zero out p_rx_fw_statistics_pram */ 2989 /* Zero out p_rx_fw_statistics_pram */
2982 memset(ugeth->p_rx_fw_statistics_pram, 0, 2990 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
2983 sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 2991 sizeof(struct ucc_geth_rx_firmware_statistics_pram));
2984 } 2992 }
2985 2993
@@ -3000,7 +3008,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3000 } 3008 }
3001 3009
3002 ugeth->p_rx_irq_coalescing_tbl = 3010 ugeth->p_rx_irq_coalescing_tbl =
3003 (struct ucc_geth_rx_interrupt_coalescing_table *) 3011 (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
3004 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 3012 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3005 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 3013 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3006 ugeth->rx_irq_coalescing_tbl_offset); 3014 ugeth->rx_irq_coalescing_tbl_offset);
@@ -3069,11 +3077,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3069 } 3077 }
3070 3078
3071 ugeth->p_rx_bd_qs_tbl = 3079 ugeth->p_rx_bd_qs_tbl =
3072 (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth-> 3080 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
3073 rx_bd_qs_tbl_offset); 3081 rx_bd_qs_tbl_offset);
3074 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 3082 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3075 /* Zero out p_rx_bd_qs_tbl */ 3083 /* Zero out p_rx_bd_qs_tbl */
3076 memset(ugeth->p_rx_bd_qs_tbl, 3084 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
3077 0, 3085 0,
3078 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3086 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3079 sizeof(struct ucc_geth_rx_prefetched_bds))); 3087 sizeof(struct ucc_geth_rx_prefetched_bds)));
@@ -3133,7 +3141,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3133 &ugeth->p_rx_glbl_pram->remoder); 3141 &ugeth->p_rx_glbl_pram->remoder);
3134 3142
3135 /* function code register */ 3143 /* function code register */
3136 ugeth->p_rx_glbl_pram->rstate = function_code; 3144 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
3137 3145
3138 /* initialize extended filtering */ 3146 /* initialize extended filtering */
3139 if (ug_info->rxExtendedFiltering) { 3147 if (ug_info->rxExtendedFiltering) {
@@ -3160,7 +3168,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3160 } 3168 }
3161 3169
3162 ugeth->p_exf_glbl_param = 3170 ugeth->p_exf_glbl_param =
3163 (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth-> 3171 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
3164 exf_glbl_param_offset); 3172 exf_glbl_param_offset);
3165 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 3173 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3166 ugeth->exf_glbl_param_offset); 3174 ugeth->exf_glbl_param_offset);
@@ -3175,7 +3183,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3175 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); 3183 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3176 3184
3177 p_82xx_addr_filt = 3185 p_82xx_addr_filt =
3178 (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> 3186 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
3179 p_rx_glbl_pram->addressfiltering; 3187 p_rx_glbl_pram->addressfiltering;
3180 3188
3181 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 3189 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
@@ -3307,17 +3315,21 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3307 return -ENOMEM; 3315 return -ENOMEM;
3308 } 3316 }
3309 p_init_enet_pram = 3317 p_init_enet_pram =
3310 (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset); 3318 (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
3311 3319
3312 /* Copy shadow InitEnet command parameter structure into PRAM */ 3320 /* Copy shadow InitEnet command parameter structure into PRAM */
3313 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; 3321 out_8(&p_init_enet_pram->resinit1,
3314 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; 3322 ugeth->p_init_enet_param_shadow->resinit1);
3315 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; 3323 out_8(&p_init_enet_pram->resinit2,
3316 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; 3324 ugeth->p_init_enet_param_shadow->resinit2);
3325 out_8(&p_init_enet_pram->resinit3,
3326 ugeth->p_init_enet_param_shadow->resinit3);
3327 out_8(&p_init_enet_pram->resinit4,
3328 ugeth->p_init_enet_param_shadow->resinit4);
3317 out_be16(&p_init_enet_pram->resinit5, 3329 out_be16(&p_init_enet_pram->resinit5,
3318 ugeth->p_init_enet_param_shadow->resinit5); 3330 ugeth->p_init_enet_param_shadow->resinit5);
3319 p_init_enet_pram->largestexternallookupkeysize = 3331 out_8(&p_init_enet_pram->largestexternallookupkeysize,
3320 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; 3332 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
3321 out_be32(&p_init_enet_pram->rgftgfrxglobal, 3333 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3322 ugeth->p_init_enet_param_shadow->rgftgfrxglobal); 3334 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3323 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) 3335 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
@@ -3371,7 +3383,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3371#ifdef CONFIG_UGETH_TX_ON_DEMAND 3383#ifdef CONFIG_UGETH_TX_ON_DEMAND
3372 struct ucc_fast_private *uccf; 3384 struct ucc_fast_private *uccf;
3373#endif 3385#endif
3374 u8 *bd; /* BD pointer */ 3386 u8 __iomem *bd; /* BD pointer */
3375 u32 bd_status; 3387 u32 bd_status;
3376 u8 txQ = 0; 3388 u8 txQ = 0;
3377 3389
@@ -3383,7 +3395,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3383 3395
3384 /* Start from the next BD that should be filled */ 3396 /* Start from the next BD that should be filled */
3385 bd = ugeth->txBd[txQ]; 3397 bd = ugeth->txBd[txQ];
3386 bd_status = in_be32((u32 *)bd); 3398 bd_status = in_be32((u32 __iomem *)bd);
3387 /* Save the skb pointer so we can free it later */ 3399 /* Save the skb pointer so we can free it later */
3388 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3400 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3389 3401
@@ -3393,7 +3405,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3393 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3405 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3394 3406
3395 /* set up the buffer descriptor */ 3407 /* set up the buffer descriptor */
3396 out_be32(&((struct qe_bd *)bd)->buf, 3408 out_be32(&((struct qe_bd __iomem *)bd)->buf,
3397 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3409 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3398 3410
3399 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3411 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
@@ -3401,7 +3413,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3401 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3413 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3402 3414
3403 /* set bd status and length */ 3415 /* set bd status and length */
3404 out_be32((u32 *)bd, bd_status); 3416 out_be32((u32 __iomem *)bd, bd_status);
3405 3417
3406 dev->trans_start = jiffies; 3418 dev->trans_start = jiffies;
3407 3419
@@ -3441,7 +3453,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3441static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3453static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
3442{ 3454{
3443 struct sk_buff *skb; 3455 struct sk_buff *skb;
3444 u8 *bd; 3456 u8 __iomem *bd;
3445 u16 length, howmany = 0; 3457 u16 length, howmany = 0;
3446 u32 bd_status; 3458 u32 bd_status;
3447 u8 *bdBuffer; 3459 u8 *bdBuffer;
@@ -3454,11 +3466,11 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3454 /* collect received buffers */ 3466 /* collect received buffers */
3455 bd = ugeth->rxBd[rxQ]; 3467 bd = ugeth->rxBd[rxQ];
3456 3468
3457 bd_status = in_be32((u32 *)bd); 3469 bd_status = in_be32((u32 __iomem *)bd);
3458 3470
3459 /* while there are received buffers and BD is full (~R_E) */ 3471 /* while there are received buffers and BD is full (~R_E) */
3460 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3472 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3461 bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf); 3473 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
3462 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3474 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3463 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3475 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3464 3476
@@ -3516,7 +3528,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3516 else 3528 else
3517 bd += sizeof(struct qe_bd); 3529 bd += sizeof(struct qe_bd);
3518 3530
3519 bd_status = in_be32((u32 *)bd); 3531 bd_status = in_be32((u32 __iomem *)bd);
3520 } 3532 }
3521 3533
3522 ugeth->rxBd[rxQ] = bd; 3534 ugeth->rxBd[rxQ] = bd;
@@ -3527,11 +3539,11 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3527{ 3539{
3528 /* Start from the next BD that should be filled */ 3540 /* Start from the next BD that should be filled */
3529 struct ucc_geth_private *ugeth = netdev_priv(dev); 3541 struct ucc_geth_private *ugeth = netdev_priv(dev);
3530 u8 *bd; /* BD pointer */ 3542 u8 __iomem *bd; /* BD pointer */
3531 u32 bd_status; 3543 u32 bd_status;
3532 3544
3533 bd = ugeth->confBd[txQ]; 3545 bd = ugeth->confBd[txQ];
3534 bd_status = in_be32((u32 *)bd); 3546 bd_status = in_be32((u32 __iomem *)bd);
3535 3547
3536 /* Normal processing. */ 3548 /* Normal processing. */
3537 while ((bd_status & T_R) == 0) { 3549 while ((bd_status & T_R) == 0) {
@@ -3561,7 +3573,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3561 bd += sizeof(struct qe_bd); 3573 bd += sizeof(struct qe_bd);
3562 else 3574 else
3563 bd = ugeth->p_tx_bd_ring[txQ]; 3575 bd = ugeth->p_tx_bd_ring[txQ];
3564 bd_status = in_be32((u32 *)bd); 3576 bd_status = in_be32((u32 __iomem *)bd);
3565 } 3577 }
3566 ugeth->confBd[txQ] = bd; 3578 ugeth->confBd[txQ] = bd;
3567 return 0; 3579 return 0;
@@ -3910,7 +3922,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3910 return -EINVAL; 3922 return -EINVAL;
3911 } 3923 }
3912 } else { 3924 } else {
3913 prop = of_get_property(np, "rx-clock", NULL); 3925 prop = of_get_property(np, "tx-clock", NULL);
3914 if (!prop) { 3926 if (!prop) {
3915 printk(KERN_ERR 3927 printk(KERN_ERR
3916 "ucc_geth: mising tx-clock-name property\n"); 3928 "ucc_geth: mising tx-clock-name property\n");
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 9f8b7580a3a4..abc0e2242634 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -700,8 +700,8 @@ struct ucc_geth_82xx_address_filtering_pram {
700 u32 iaddr_l; /* individual address filter, low */ 700 u32 iaddr_l; /* individual address filter, low */
701 u32 gaddr_h; /* group address filter, high */ 701 u32 gaddr_h; /* group address filter, high */
702 u32 gaddr_l; /* group address filter, low */ 702 u32 gaddr_l; /* group address filter, low */
703 struct ucc_geth_82xx_enet_address taddr; 703 struct ucc_geth_82xx_enet_address __iomem taddr;
704 struct ucc_geth_82xx_enet_address paddr[NUM_OF_PADDRS]; 704 struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
705 u8 res0[0x40 - 0x38]; 705 u8 res0[0x40 - 0x38];
706} __attribute__ ((packed)); 706} __attribute__ ((packed));
707 707
@@ -1186,40 +1186,40 @@ struct ucc_geth_private {
1186 struct ucc_fast_private *uccf; 1186 struct ucc_fast_private *uccf;
1187 struct net_device *dev; 1187 struct net_device *dev;
1188 struct napi_struct napi; 1188 struct napi_struct napi;
1189 struct ucc_geth *ug_regs; 1189 struct ucc_geth __iomem *ug_regs;
1190 struct ucc_geth_init_pram *p_init_enet_param_shadow; 1190 struct ucc_geth_init_pram *p_init_enet_param_shadow;
1191 struct ucc_geth_exf_global_pram *p_exf_glbl_param; 1191 struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
1192 u32 exf_glbl_param_offset; 1192 u32 exf_glbl_param_offset;
1193 struct ucc_geth_rx_global_pram *p_rx_glbl_pram; 1193 struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
1194 u32 rx_glbl_pram_offset; 1194 u32 rx_glbl_pram_offset;
1195 struct ucc_geth_tx_global_pram *p_tx_glbl_pram; 1195 struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
1196 u32 tx_glbl_pram_offset; 1196 u32 tx_glbl_pram_offset;
1197 struct ucc_geth_send_queue_mem_region *p_send_q_mem_reg; 1197 struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
1198 u32 send_q_mem_reg_offset; 1198 u32 send_q_mem_reg_offset;
1199 struct ucc_geth_thread_data_tx *p_thread_data_tx; 1199 struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
1200 u32 thread_dat_tx_offset; 1200 u32 thread_dat_tx_offset;
1201 struct ucc_geth_thread_data_rx *p_thread_data_rx; 1201 struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx;
1202 u32 thread_dat_rx_offset; 1202 u32 thread_dat_rx_offset;
1203 struct ucc_geth_scheduler *p_scheduler; 1203 struct ucc_geth_scheduler __iomem *p_scheduler;
1204 u32 scheduler_offset; 1204 u32 scheduler_offset;
1205 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 1205 struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram;
1206 u32 tx_fw_statistics_pram_offset; 1206 u32 tx_fw_statistics_pram_offset;
1207 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 1207 struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram;
1208 u32 rx_fw_statistics_pram_offset; 1208 u32 rx_fw_statistics_pram_offset;
1209 struct ucc_geth_rx_interrupt_coalescing_table *p_rx_irq_coalescing_tbl; 1209 struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl;
1210 u32 rx_irq_coalescing_tbl_offset; 1210 u32 rx_irq_coalescing_tbl_offset;
1211 struct ucc_geth_rx_bd_queues_entry *p_rx_bd_qs_tbl; 1211 struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
1212 u32 rx_bd_qs_tbl_offset; 1212 u32 rx_bd_qs_tbl_offset;
1213 u8 *p_tx_bd_ring[NUM_TX_QUEUES]; 1213 u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
1214 u32 tx_bd_ring_offset[NUM_TX_QUEUES]; 1214 u32 tx_bd_ring_offset[NUM_TX_QUEUES];
1215 u8 *p_rx_bd_ring[NUM_RX_QUEUES]; 1215 u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
1216 u32 rx_bd_ring_offset[NUM_RX_QUEUES]; 1216 u32 rx_bd_ring_offset[NUM_RX_QUEUES];
1217 u8 *confBd[NUM_TX_QUEUES]; 1217 u8 __iomem *confBd[NUM_TX_QUEUES];
1218 u8 *txBd[NUM_TX_QUEUES]; 1218 u8 __iomem *txBd[NUM_TX_QUEUES];
1219 u8 *rxBd[NUM_RX_QUEUES]; 1219 u8 __iomem *rxBd[NUM_RX_QUEUES];
1220 int badFrame[NUM_RX_QUEUES]; 1220 int badFrame[NUM_RX_QUEUES];
1221 u16 cpucount[NUM_TX_QUEUES]; 1221 u16 cpucount[NUM_TX_QUEUES];
1222 volatile u16 *p_cpucount[NUM_TX_QUEUES]; 1222 u16 __iomem *p_cpucount[NUM_TX_QUEUES];
1223 int indAddrRegUsed[NUM_OF_PADDRS]; 1223 int indAddrRegUsed[NUM_OF_PADDRS];
1224 u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ 1224 u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
1225 u8 numGroupAddrInHash; 1225 u8 numGroupAddrInHash;
@@ -1251,4 +1251,12 @@ struct ucc_geth_private {
1251 int oldlink; 1251 int oldlink;
1252}; 1252};
1253 1253
1254void uec_set_ethtool_ops(struct net_device *netdev);
1255int init_flow_control_params(u32 automatic_flow_control_mode,
1256 int rx_flow_control_enable, int tx_flow_control_enable,
1257 u16 pause_period, u16 extension_field,
1258 u32 __iomem *upsmr_register, u32 __iomem *uempr_register,
1259 u32 __iomem *maccfg1_register);
1260
1261
1254#endif /* __UCC_GETH_H__ */ 1262#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 33200038a14d..5f176f2b1c17 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -108,12 +108,6 @@ static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
108#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) 108#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings)
109#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) 109#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
110 110
111extern int init_flow_control_params(u32 automatic_flow_control_mode,
112 int rx_flow_control_enable,
113 int tx_flow_control_enable, u16 pause_period,
114 u16 extension_field, volatile u32 *upsmr_register,
115 volatile u32 *uempr_register, volatile u32 *maccfg1_register);
116
117static int 111static int
118uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 112uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
119{ 113{
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index 2af490781005..940474736922 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -104,7 +104,7 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
104} 104}
105 105
106/* Reset the MIIM registers, and wait for the bus to free */ 106/* Reset the MIIM registers, and wait for the bus to free */
107int uec_mdio_reset(struct mii_bus *bus) 107static int uec_mdio_reset(struct mii_bus *bus)
108{ 108{
109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv; 109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
110 unsigned int timeout = PHY_INIT_TIMEOUT; 110 unsigned int timeout = PHY_INIT_TIMEOUT;
@@ -240,7 +240,7 @@ reg_map_fail:
240 return err; 240 return err;
241} 241}
242 242
243int uec_mdio_remove(struct of_device *ofdev) 243static int uec_mdio_remove(struct of_device *ofdev)
244{ 244{
245 struct device *device = &ofdev->dev; 245 struct device *device = &ofdev->dev;
246 struct mii_bus *bus = dev_get_drvdata(device); 246 struct mii_bus *bus = dev_get_drvdata(device);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6f245cfb6624..dc6f097062df 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1381,6 +1381,10 @@ static const struct usb_device_id products [] = {
1381 USB_DEVICE (0x0411, 0x003d), 1381 USB_DEVICE (0x0411, 0x003d),
1382 .driver_info = (unsigned long) &ax8817x_info, 1382 .driver_info = (unsigned long) &ax8817x_info,
1383}, { 1383}, {
1384 // Buffalo LUA-U2-GT 10/100/1000
1385 USB_DEVICE (0x0411, 0x006e),
1386 .driver_info = (unsigned long) &ax88178_info,
1387}, {
1384 // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter" 1388 // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
1385 USB_DEVICE (0x6189, 0x182d), 1389 USB_DEVICE (0x6189, 0x182d),
1386 .driver_info = (unsigned long) &ax8817x_info, 1390 .driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 8005dd16fb4e..d5140aed7b79 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -150,11 +150,9 @@ config HDLC_FR
150 150
151config HDLC_PPP 151config HDLC_PPP
152 tristate "Synchronous Point-to-Point Protocol (PPP) support" 152 tristate "Synchronous Point-to-Point Protocol (PPP) support"
153 depends on HDLC && BROKEN 153 depends on HDLC
154 help 154 help
155 Generic HDLC driver supporting PPP over WAN connections. 155 Generic HDLC driver supporting PPP over WAN connections.
156 This module is currently broken and will cause a kernel panic
157 when a device configured in PPP mode is activated.
158 156
159 It will be replaced by new PPP implementation in Linux 2.6.26. 157 It will be replaced by new PPP implementation in Linux 2.6.26.
160 158
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 45ddfc9763cc..b0fce1387eaf 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -629,7 +629,7 @@ static void sppp_channel_init(struct channel_data *chan)
629 d->base_addr = chan->cosa->datareg; 629 d->base_addr = chan->cosa->datareg;
630 d->irq = chan->cosa->irq; 630 d->irq = chan->cosa->irq;
631 d->dma = chan->cosa->dma; 631 d->dma = chan->cosa->dma;
632 d->priv = chan; 632 d->ml_priv = chan;
633 sppp_attach(&chan->pppdev); 633 sppp_attach(&chan->pppdev);
634 if (register_netdev(d)) { 634 if (register_netdev(d)) {
635 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name); 635 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
@@ -650,7 +650,7 @@ static void sppp_channel_delete(struct channel_data *chan)
650 650
651static int cosa_sppp_open(struct net_device *d) 651static int cosa_sppp_open(struct net_device *d)
652{ 652{
653 struct channel_data *chan = d->priv; 653 struct channel_data *chan = d->ml_priv;
654 int err; 654 int err;
655 unsigned long flags; 655 unsigned long flags;
656 656
@@ -690,7 +690,7 @@ static int cosa_sppp_open(struct net_device *d)
690 690
691static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) 691static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
692{ 692{
693 struct channel_data *chan = dev->priv; 693 struct channel_data *chan = dev->ml_priv;
694 694
695 netif_stop_queue(dev); 695 netif_stop_queue(dev);
696 696
@@ -701,7 +701,7 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
701 701
702static void cosa_sppp_timeout(struct net_device *dev) 702static void cosa_sppp_timeout(struct net_device *dev)
703{ 703{
704 struct channel_data *chan = dev->priv; 704 struct channel_data *chan = dev->ml_priv;
705 705
706 if (test_bit(RXBIT, &chan->cosa->rxtx)) { 706 if (test_bit(RXBIT, &chan->cosa->rxtx)) {
707 chan->stats.rx_errors++; 707 chan->stats.rx_errors++;
@@ -720,7 +720,7 @@ static void cosa_sppp_timeout(struct net_device *dev)
720 720
721static int cosa_sppp_close(struct net_device *d) 721static int cosa_sppp_close(struct net_device *d)
722{ 722{
723 struct channel_data *chan = d->priv; 723 struct channel_data *chan = d->ml_priv;
724 unsigned long flags; 724 unsigned long flags;
725 725
726 netif_stop_queue(d); 726 netif_stop_queue(d);
@@ -800,7 +800,7 @@ static int sppp_tx_done(struct channel_data *chan, int size)
800 800
801static struct net_device_stats *cosa_net_stats(struct net_device *dev) 801static struct net_device_stats *cosa_net_stats(struct net_device *dev)
802{ 802{
803 struct channel_data *chan = dev->priv; 803 struct channel_data *chan = dev->ml_priv;
804 return &chan->stats; 804 return &chan->stats;
805} 805}
806 806
@@ -1217,7 +1217,7 @@ static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr,
1217 int cmd) 1217 int cmd)
1218{ 1218{
1219 int rv; 1219 int rv;
1220 struct channel_data *chan = dev->priv; 1220 struct channel_data *chan = dev->ml_priv;
1221 rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); 1221 rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data);
1222 if (rv == -ENOIOCTLCMD) { 1222 if (rv == -ENOIOCTLCMD) {
1223 return sppp_do_ioctl(dev, ifr, cmd); 1223 return sppp_do_ioctl(dev, ifr, cmd);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 10396d9686f4..00308337928e 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -45,7 +45,7 @@ static int ppp_open(struct net_device *dev)
45 int (*old_ioctl)(struct net_device *, struct ifreq *, int); 45 int (*old_ioctl)(struct net_device *, struct ifreq *, int);
46 int result; 46 int result;
47 47
48 dev->priv = &state(hdlc)->syncppp_ptr; 48 dev->ml_priv = &state(hdlc)->syncppp_ptr;
49 state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev; 49 state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev;
50 state(hdlc)->pppdev.dev = dev; 50 state(hdlc)->pppdev.dev = dev;
51 51
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 83dbc924fcb5..f3065d3473fd 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -75,7 +75,7 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
75 75
76static int hostess_open(struct net_device *d) 76static int hostess_open(struct net_device *d)
77{ 77{
78 struct sv11_device *sv11=d->priv; 78 struct sv11_device *sv11=d->ml_priv;
79 int err = -1; 79 int err = -1;
80 80
81 /* 81 /*
@@ -128,7 +128,7 @@ static int hostess_open(struct net_device *d)
128 128
129static int hostess_close(struct net_device *d) 129static int hostess_close(struct net_device *d)
130{ 130{
131 struct sv11_device *sv11=d->priv; 131 struct sv11_device *sv11=d->ml_priv;
132 /* 132 /*
133 * Discard new frames 133 * Discard new frames
134 */ 134 */
@@ -159,14 +159,14 @@ static int hostess_close(struct net_device *d)
159 159
160static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 160static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
161{ 161{
162 /* struct sv11_device *sv11=d->priv; 162 /* struct sv11_device *sv11=d->ml_priv;
163 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ 163 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */
164 return sppp_do_ioctl(d, ifr,cmd); 164 return sppp_do_ioctl(d, ifr,cmd);
165} 165}
166 166
167static struct net_device_stats *hostess_get_stats(struct net_device *d) 167static struct net_device_stats *hostess_get_stats(struct net_device *d)
168{ 168{
169 struct sv11_device *sv11=d->priv; 169 struct sv11_device *sv11=d->ml_priv;
170 if(sv11) 170 if(sv11)
171 return z8530_get_stats(&sv11->sync.chanA); 171 return z8530_get_stats(&sv11->sync.chanA);
172 else 172 else
@@ -179,7 +179,7 @@ static struct net_device_stats *hostess_get_stats(struct net_device *d)
179 179
180static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 180static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
181{ 181{
182 struct sv11_device *sv11=d->priv; 182 struct sv11_device *sv11=d->ml_priv;
183 return z8530_queue_xmit(&sv11->sync.chanA, skb); 183 return z8530_queue_xmit(&sv11->sync.chanA, skb);
184} 184}
185 185
@@ -325,6 +325,7 @@ static struct sv11_device *sv11_init(int iobase, int irq)
325 /* 325 /*
326 * Initialise the PPP components 326 * Initialise the PPP components
327 */ 327 */
328 d->ml_priv = sv;
328 sppp_attach(&sv->netdev); 329 sppp_attach(&sv->netdev);
329 330
330 /* 331 /*
@@ -333,7 +334,6 @@ static struct sv11_device *sv11_init(int iobase, int irq)
333 334
334 d->base_addr = iobase; 335 d->base_addr = iobase;
335 d->irq = irq; 336 d->irq = irq;
336 d->priv = sv;
337 337
338 if(register_netdev(d)) 338 if(register_netdev(d))
339 { 339 {
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index b5860b97a93e..24fd613466b7 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -459,6 +459,7 @@ static void __exit lapbeth_cleanup_driver(void)
459 list_for_each_safe(entry, tmp, &lapbeth_devices) { 459 list_for_each_safe(entry, tmp, &lapbeth_devices) {
460 lapbeth = list_entry(entry, struct lapbethdev, node); 460 lapbeth = list_entry(entry, struct lapbethdev, node);
461 461
462 dev_put(lapbeth->ethdev);
462 unregister_netdevice(lapbeth->axdev); 463 unregister_netdevice(lapbeth->axdev);
463 } 464 }
464 rtnl_unlock(); 465 rtnl_unlock();
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 6635ecef36e5..62133cee446a 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -891,6 +891,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
891 891
892 /* Initialize the sppp layer */ 892 /* Initialize the sppp layer */
893 /* An ioctl can cause a subsequent detach for raw frame interface */ 893 /* An ioctl can cause a subsequent detach for raw frame interface */
894 dev->ml_priv = sc;
894 sc->if_type = LMC_PPP; 895 sc->if_type = LMC_PPP;
895 sc->check = 0xBEAFCAFE; 896 sc->check = 0xBEAFCAFE;
896 dev->base_addr = pci_resource_start(pdev, 0); 897 dev->base_addr = pci_resource_start(pdev, 0);
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 11276bf3149f..44a89df1b8bf 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -241,6 +241,7 @@ static inline struct slvl_device *slvl_alloc(int iobase, int irq)
241 return NULL; 241 return NULL;
242 242
243 sv = d->priv; 243 sv = d->priv;
244 d->ml_priv = sv;
244 sv->if_ptr = &sv->pppdev; 245 sv->if_ptr = &sv->pppdev;
245 sv->pppdev.dev = d; 246 sv->pppdev.dev = d;
246 d->base_addr = iobase; 247 d->base_addr = iobase;
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index cc0006900b30..995dd537083f 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,6 +1,5 @@
1config IWLWIFI 1config IWLWIFI
2 bool 2 tristate
3 default n
4 3
5config IWLCORE 4config IWLCORE
6 tristate "Intel Wireless Wifi Core" 5 tristate "Intel Wireless Wifi Core"
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 8464397f7816..9df48b33f0ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -666,7 +666,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
666 rx_status.flag = 0; 666 rx_status.flag = 0;
667 rx_status.mactime = le64_to_cpu(rx_end->timestamp); 667 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
668 rx_status.freq = 668 rx_status.freq =
669 ieee80211_frequency_to_channel(le16_to_cpu(rx_hdr->channel)); 669 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
670 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 670 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
671 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 671 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
672 672
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
index 67356d9a0c59..24dee00b0e85 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -168,8 +168,8 @@ struct iwl4965_lq_sta {
168 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 168 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
169#endif 169#endif
170 u32 dbg_fixed_rate; 170 u32 dbg_fixed_rate;
171 struct iwl_priv *drv;
172#endif 171#endif
172 struct iwl_priv *drv;
173}; 173};
174 174
175static void rs_rate_scale_perform(struct iwl_priv *priv, 175static void rs_rate_scale_perform(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9546582e983f..5d675e39bab8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -3128,7 +3128,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
3128 3128
3129 rx_status.mactime = le64_to_cpu(rx_start->timestamp); 3129 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
3130 rx_status.freq = 3130 rx_status.freq =
3131 ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel)); 3131 ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel));
3132 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 3132 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3133 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 3133 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
3134 rx_status.rate_idx = 3134 rx_status.rate_idx =
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 04c2638d75ad..9196825ed1b5 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -388,8 +388,15 @@ islpci_open(struct net_device *ndev)
388 388
389 netif_start_queue(ndev); 389 netif_start_queue(ndev);
390 390
391 /* Turn off carrier unless we know we have associated */ 391 /* Turn off carrier if in STA or Ad-hoc mode. It will be turned on
392 netif_carrier_off(ndev); 392 * once the firmware receives a trap of being associated
393 * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we
394 * should just leave the carrier on as its expected the firmware
395 * won't send us a trigger. */
396 if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC)
397 netif_carrier_off(ndev);
398 else
399 netif_carrier_on(ndev);
393 400
394 return 0; 401 return 0;
395} 402}
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9929b15e28ba..61510c519351 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1028,8 +1028,10 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1028 * Initialize the device. 1028 * Initialize the device.
1029 */ 1029 */
1030 status = rt2x00dev->ops->lib->initialize(rt2x00dev); 1030 status = rt2x00dev->ops->lib->initialize(rt2x00dev);
1031 if (status) 1031 if (status) {
1032 goto exit; 1032 rt2x00queue_uninitialize(rt2x00dev);
1033 return status;
1034 }
1033 1035
1034 __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); 1036 __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags);
1035 1037
@@ -1039,11 +1041,6 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1039 rt2x00rfkill_register(rt2x00dev); 1041 rt2x00rfkill_register(rt2x00dev);
1040 1042
1041 return 0; 1043 return 0;
1042
1043exit:
1044 rt2x00lib_uninitialize(rt2x00dev);
1045
1046 return status;
1047} 1044}
1048 1045
1049int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) 1046int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 9e5d94e44c5c..c17078eac197 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -314,13 +314,14 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
314 if (status) { 314 if (status) {
315 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", 315 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
316 pci_dev->irq, status); 316 pci_dev->irq, status);
317 return status; 317 goto exit;
318 } 318 }
319 319
320 return 0; 320 return 0;
321 321
322exit: 322exit:
323 rt2x00pci_uninitialize(rt2x00dev); 323 queue_for_each(rt2x00dev, queue)
324 rt2x00pci_free_queue_dma(rt2x00dev, queue);
324 325
325 return status; 326 return status;
326} 327}
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 98af4d26583d..b64f2f5d0d53 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2362,6 +2362,7 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2362{ 2362{
2363 struct rt2x00_dev *rt2x00dev = hw->priv; 2363 struct rt2x00_dev *rt2x00dev = hw->priv;
2364 struct rt2x00_intf *intf = vif_to_intf(control->vif); 2364 struct rt2x00_intf *intf = vif_to_intf(control->vif);
2365 struct queue_entry_priv_pci_tx *priv_tx;
2365 struct skb_frame_desc *skbdesc; 2366 struct skb_frame_desc *skbdesc;
2366 unsigned int beacon_base; 2367 unsigned int beacon_base;
2367 u32 reg; 2368 u32 reg;
@@ -2369,21 +2370,8 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2369 if (unlikely(!intf->beacon)) 2370 if (unlikely(!intf->beacon))
2370 return -ENOBUFS; 2371 return -ENOBUFS;
2371 2372
2372 /* 2373 priv_tx = intf->beacon->priv_data;
2373 * We need to append the descriptor in front of the 2374 memset(priv_tx->desc, 0, intf->beacon->queue->desc_size);
2374 * beacon frame.
2375 */
2376 if (skb_headroom(skb) < intf->beacon->queue->desc_size) {
2377 if (pskb_expand_head(skb, intf->beacon->queue->desc_size,
2378 0, GFP_ATOMIC))
2379 return -ENOMEM;
2380 }
2381
2382 /*
2383 * Add the descriptor in front of the skb.
2384 */
2385 skb_push(skb, intf->beacon->queue->desc_size);
2386 memset(skb->data, 0, intf->beacon->queue->desc_size);
2387 2375
2388 /* 2376 /*
2389 * Fill in skb descriptor 2377 * Fill in skb descriptor
@@ -2391,9 +2379,9 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2391 skbdesc = get_skb_frame_desc(skb); 2379 skbdesc = get_skb_frame_desc(skb);
2392 memset(skbdesc, 0, sizeof(*skbdesc)); 2380 memset(skbdesc, 0, sizeof(*skbdesc));
2393 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 2381 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
2394 skbdesc->data = skb->data + intf->beacon->queue->desc_size; 2382 skbdesc->data = skb->data;
2395 skbdesc->data_len = skb->len - intf->beacon->queue->desc_size; 2383 skbdesc->data_len = skb->len;
2396 skbdesc->desc = skb->data; 2384 skbdesc->desc = priv_tx->desc;
2397 skbdesc->desc_len = intf->beacon->queue->desc_size; 2385 skbdesc->desc_len = intf->beacon->queue->desc_size;
2398 skbdesc->entry = intf->beacon; 2386 skbdesc->entry = intf->beacon;
2399 2387
@@ -2414,7 +2402,10 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2414 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 2402 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
2415 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 2403 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
2416 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 2404 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
2417 skb->data, skb->len); 2405 skbdesc->desc, skbdesc->desc_len);
2406 rt2x00pci_register_multiwrite(rt2x00dev,
2407 beacon_base + skbdesc->desc_len,
2408 skbdesc->data, skbdesc->data_len);
2418 rt61pci_kick_tx_queue(rt2x00dev, QID_BEACON); 2409 rt61pci_kick_tx_queue(rt2x00dev, QID_BEACON);
2419 2410
2420 return 0; 2411 return 0;
@@ -2479,7 +2470,7 @@ static const struct data_queue_desc rt61pci_queue_tx = {
2479 2470
2480static const struct data_queue_desc rt61pci_queue_bcn = { 2471static const struct data_queue_desc rt61pci_queue_bcn = {
2481 .entry_num = 4 * BEACON_ENTRIES, 2472 .entry_num = 4 * BEACON_ENTRIES,
2482 .data_size = MGMT_FRAME_SIZE, 2473 .data_size = 0, /* No DMA required for beacons */
2483 .desc_size = TXINFO_SIZE, 2474 .desc_size = TXINFO_SIZE,
2484 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 2475 .priv_size = sizeof(struct queue_entry_priv_pci_tx),
2485}; 2476};
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 5dd23c93497d..883af891ebfb 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -2611,7 +2611,7 @@ static int strip_open(struct tty_struct *tty)
2611 * We need a write method. 2611 * We need a write method.
2612 */ 2612 */
2613 2613
2614 if (tty->ops->write == NULL) 2614 if (tty->ops->write == NULL || tty->ops->set_termios == NULL)
2615 return -EOPNOTSUPP; 2615 return -EOPNOTSUPP;
2616 2616
2617 /* 2617 /*
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 03384a43186b..49ae97003952 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -908,9 +908,9 @@ static void wv_psa_show(psa_t * p)
908 p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5], 908 p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5],
909 p->psa_call_code[6], p->psa_call_code[7]); 909 p->psa_call_code[6], p->psa_call_code[7]);
910#ifdef DEBUG_SHOW_UNUSED 910#ifdef DEBUG_SHOW_UNUSED
911 printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", 911 printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n",
912 p->psa_reserved[0], 912 p->psa_reserved[0],
913 p->psa_reserved[1], p->psa_reserved[2], p->psa_reserved[3]); 913 p->psa_reserved[1]);
914#endif /* DEBUG_SHOW_UNUSED */ 914#endif /* DEBUG_SHOW_UNUSED */
915 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); 915 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
916 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]); 916 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index baf74015751c..b584c0ecc62d 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1074,11 +1074,9 @@ wv_psa_show(psa_t * p)
1074 p->psa_call_code[6], 1074 p->psa_call_code[6],
1075 p->psa_call_code[7]); 1075 p->psa_call_code[7]);
1076#ifdef DEBUG_SHOW_UNUSED 1076#ifdef DEBUG_SHOW_UNUSED
1077 printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", 1077 printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n",
1078 p->psa_reserved[0], 1078 p->psa_reserved[0],
1079 p->psa_reserved[1], 1079 p->psa_reserved[1]);
1080 p->psa_reserved[2],
1081 p->psa_reserved[3]);
1082#endif /* DEBUG_SHOW_UNUSED */ 1080#endif /* DEBUG_SHOW_UNUSED */
1083 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); 1081 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
1084 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]); 1082 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 5316074f39f0..12e24f04dddf 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -889,9 +889,13 @@ static void tx_urb_complete(struct urb *urb)
889 } 889 }
890free_urb: 890free_urb:
891 skb = (struct sk_buff *)urb->context; 891 skb = (struct sk_buff *)urb->context;
892 zd_mac_tx_to_dev(skb, urb->status); 892 /*
893 * grab 'usb' pointer before handing off the skb (since
894 * it might be freed by zd_mac_tx_to_dev or mac80211)
895 */
893 cb = (struct zd_tx_skb_control_block *)skb->cb; 896 cb = (struct zd_tx_skb_control_block *)skb->cb;
894 usb = &zd_hw_mac(cb->hw)->chip.usb; 897 usb = &zd_hw_mac(cb->hw)->chip.usb;
898 zd_mac_tx_to_dev(skb, urb->status);
895 free_tx_urb(usb, urb); 899 free_tx_urb(usb, urb);
896 tx_dec_submitted_urbs(usb); 900 tx_dec_submitted_urbs(usb);
897 return; 901 return;