aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139cp.c46
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/s2io.c9
-rw-r--r--drivers/net/sk98lin/skge.c12
-rw-r--r--drivers/net/skge.c98
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/tg3.c108
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/wireless/airo.c5
15 files changed, 158 insertions, 150 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index bd99c268e2da..f822cd3025ff 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -353,8 +353,6 @@ struct cp_private {
353 353
354 struct net_device_stats net_stats; 354 struct net_device_stats net_stats;
355 struct cp_extra_stats cp_stats; 355 struct cp_extra_stats cp_stats;
356 struct cp_dma_stats *nic_stats;
357 dma_addr_t nic_stats_dma;
358 356
359 unsigned rx_tail ____cacheline_aligned; 357 unsigned rx_tail ____cacheline_aligned;
360 struct cp_desc *rx_ring; 358 struct cp_desc *rx_ring;
@@ -1142,10 +1140,6 @@ static int cp_alloc_rings (struct cp_private *cp)
1142 cp->rx_ring = mem; 1140 cp->rx_ring = mem;
1143 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; 1141 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1144 1142
1145 mem += (CP_RING_BYTES - CP_STATS_SIZE);
1146 cp->nic_stats = mem;
1147 cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE);
1148
1149 return cp_init_rings(cp); 1143 return cp_init_rings(cp);
1150} 1144}
1151 1145
@@ -1186,7 +1180,6 @@ static void cp_free_rings (struct cp_private *cp)
1186 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); 1180 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1187 cp->rx_ring = NULL; 1181 cp->rx_ring = NULL;
1188 cp->tx_ring = NULL; 1182 cp->tx_ring = NULL;
1189 cp->nic_stats = NULL;
1190} 1183}
1191 1184
1192static int cp_open (struct net_device *dev) 1185static int cp_open (struct net_device *dev)
@@ -1515,13 +1508,17 @@ static void cp_get_ethtool_stats (struct net_device *dev,
1515 struct ethtool_stats *estats, u64 *tmp_stats) 1508 struct ethtool_stats *estats, u64 *tmp_stats)
1516{ 1509{
1517 struct cp_private *cp = netdev_priv(dev); 1510 struct cp_private *cp = netdev_priv(dev);
1511 struct cp_dma_stats *nic_stats;
1512 dma_addr_t dma;
1518 int i; 1513 int i;
1519 1514
1520 memset(cp->nic_stats, 0, sizeof(struct cp_dma_stats)); 1515 nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1516 if (!nic_stats)
1517 return;
1521 1518
1522 /* begin NIC statistics dump */ 1519 /* begin NIC statistics dump */
1523 cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16); 1520 cpw32(StatsAddr + 4, (u64)dma >> 32);
1524 cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats); 1521 cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1525 cpr32(StatsAddr); 1522 cpr32(StatsAddr);
1526 1523
1527 for (i = 0; i < 1000; i++) { 1524 for (i = 0; i < 1000; i++) {
@@ -1531,24 +1528,27 @@ static void cp_get_ethtool_stats (struct net_device *dev,
1531 } 1528 }
1532 cpw32(StatsAddr, 0); 1529 cpw32(StatsAddr, 0);
1533 cpw32(StatsAddr + 4, 0); 1530 cpw32(StatsAddr + 4, 0);
1531 cpr32(StatsAddr);
1534 1532
1535 i = 0; 1533 i = 0;
1536 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok); 1534 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1537 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok); 1535 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1538 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err); 1536 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1539 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err); 1537 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1540 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo); 1538 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1541 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align); 1539 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1542 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col); 1540 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1543 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol); 1541 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1544 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys); 1542 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1545 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast); 1543 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1546 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast); 1544 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1547 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort); 1545 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1548 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun); 1546 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1549 tmp_stats[i++] = cp->cp_stats.rx_frags; 1547 tmp_stats[i++] = cp->cp_stats.rx_frags;
1550 if (i != CP_NUM_STATS) 1548 if (i != CP_NUM_STATS)
1551 BUG(); 1549 BUG();
1550
1551 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1552} 1552}
1553 1553
1554static struct ethtool_ops cp_ethtool_ops = { 1554static struct ethtool_ops cp_ethtool_ops = {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 82603e419d76..ff3fccd7513b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1951,7 +1951,7 @@ config SKGE
1951 ---help--- 1951 ---help---
1952 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx 1952 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
1953 and related Gigabit Ethernet adapters. It is a new smaller driver 1953 and related Gigabit Ethernet adapters. It is a new smaller driver
1954 driver with better performance and more complete ethtool support. 1954 with better performance and more complete ethtool support.
1955 1955
1956 It does not support the link failover and network management 1956 It does not support the link failover and network management
1957 features that "portable" vendor supplied sk98lin driver does. 1957 features that "portable" vendor supplied sk98lin driver does.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 94c9f68dd16b..90449a0f2a6c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2879,6 +2879,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2879 * This target is not on a VLAN 2879 * This target is not on a VLAN
2880 */ 2880 */
2881 if (rt->u.dst.dev == bond->dev) { 2881 if (rt->u.dst.dev == bond->dev) {
2882 ip_rt_put(rt);
2882 dprintk("basa: rtdev == bond->dev: arp_send\n"); 2883 dprintk("basa: rtdev == bond->dev: arp_send\n");
2883 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2884 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2884 bond->master_ip, 0); 2885 bond->master_ip, 0);
@@ -2898,6 +2899,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2898 } 2899 }
2899 2900
2900 if (vlan_id) { 2901 if (vlan_id) {
2902 ip_rt_put(rt);
2901 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2903 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2902 vlan->vlan_ip, vlan_id); 2904 vlan->vlan_ip, vlan_id);
2903 continue; 2905 continue;
@@ -2909,6 +2911,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2909 bond->dev->name, NIPQUAD(fl.fl4_dst), 2911 bond->dev->name, NIPQUAD(fl.fl4_dst),
2910 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL"); 2912 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
2911 } 2913 }
2914 ip_rt_put(rt);
2912 } 2915 }
2913} 2916}
2914 2917
@@ -5036,6 +5039,14 @@ static int __init bonding_init(void)
5036 return 0; 5039 return 0;
5037 5040
5038out_err: 5041out_err:
5042 /*
5043 * rtnl_unlock() will run netdev_run_todo(), putting the
5044 * thus-far-registered bonding devices into a state which
5045 * unregigister_netdevice() will accept
5046 */
5047 rtnl_unlock();
5048 rtnl_lock();
5049
5039 /* free and unregister all bonds that were successfully added */ 5050 /* free and unregister all bonds that were successfully added */
5040 bond_free_all(); 5051 bond_free_all();
5041 5052
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 1c918309539f..c15406d46418 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1387,13 +1387,13 @@ static void e100_update_stats(struct nic *nic)
1387 ns->collisions += nic->tx_collisions; 1387 ns->collisions += nic->tx_collisions;
1388 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + 1388 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1389 le32_to_cpu(s->tx_lost_crs); 1389 le32_to_cpu(s->tx_lost_crs);
1390 ns->rx_dropped += le32_to_cpu(s->rx_resource_errors);
1391 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) + 1390 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1392 nic->rx_over_length_errors; 1391 nic->rx_over_length_errors;
1393 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); 1392 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1394 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); 1393 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1395 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); 1394 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1396 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); 1395 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1396 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1397 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + 1397 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1398 le32_to_cpu(s->rx_alignment_errors) + 1398 le32_to_cpu(s->rx_alignment_errors) +
1399 le32_to_cpu(s->rx_short_frame_errors) + 1399 le32_to_cpu(s->rx_short_frame_errors) +
@@ -1727,12 +1727,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1727 1727
1728 if(unlikely(!(rfd_status & cb_ok))) { 1728 if(unlikely(!(rfd_status & cb_ok))) {
1729 /* Don't indicate if hardware indicates errors */ 1729 /* Don't indicate if hardware indicates errors */
1730 nic->net_stats.rx_dropped++;
1731 dev_kfree_skb_any(skb); 1730 dev_kfree_skb_any(skb);
1732 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) { 1731 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1733 /* Don't indicate oversized frames */ 1732 /* Don't indicate oversized frames */
1734 nic->rx_over_length_errors++; 1733 nic->rx_over_length_errors++;
1735 nic->net_stats.rx_dropped++;
1736 dev_kfree_skb_any(skb); 1734 dev_kfree_skb_any(skb);
1737 } else { 1735 } else {
1738 nic->net_stats.rx_packets++; 1736 nic->net_stats.rx_packets++;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d02883dcc9b3..c062b0ad8262 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2545,7 +2545,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
2545 adapter->stats.crcerrs + adapter->stats.algnerrc + 2545 adapter->stats.crcerrs + adapter->stats.algnerrc +
2546 adapter->stats.rlec + adapter->stats.mpc + 2546 adapter->stats.rlec + adapter->stats.mpc +
2547 adapter->stats.cexterr; 2547 adapter->stats.cexterr;
2548 adapter->net_stats.rx_dropped = adapter->stats.mpc;
2549 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 2548 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2550 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 2549 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2551 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 2550 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 5015eaf4e20a..176680cb153e 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1617,8 +1617,6 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1617 adapter->stats.icbc + 1617 adapter->stats.icbc +
1618 adapter->stats.ecbc + adapter->stats.mpc; 1618 adapter->stats.ecbc + adapter->stats.mpc;
1619 1619
1620 adapter->net_stats.rx_dropped = adapter->stats.mpc;
1621
1622 /* see above 1620 /* see above
1623 * adapter->net_stats.rx_length_errors = adapter->stats.rlec; 1621 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1624 */ 1622 */
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index c829e6a2e8a6..dd451e099a4c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -428,7 +428,7 @@ static int init_shared_mem(struct s2io_nic *nic)
428 DBG_PRINT(INIT_DBG, 428 DBG_PRINT(INIT_DBG,
429 "%s: Zero DMA address for TxDL. ", dev->name); 429 "%s: Zero DMA address for TxDL. ", dev->name);
430 DBG_PRINT(INIT_DBG, 430 DBG_PRINT(INIT_DBG,
431 "Virtual address %llx\n", (u64)tmp_v); 431 "Virtual address %p\n", tmp_v);
432 tmp_v = pci_alloc_consistent(nic->pdev, 432 tmp_v = pci_alloc_consistent(nic->pdev,
433 PAGE_SIZE, &tmp_p); 433 PAGE_SIZE, &tmp_p);
434 if (!tmp_v) { 434 if (!tmp_v) {
@@ -657,9 +657,10 @@ static void free_shared_mem(struct s2io_nic *nic)
657 mac_control->zerodma_virt_addr, 657 mac_control->zerodma_virt_addr,
658 (dma_addr_t)0); 658 (dma_addr_t)0);
659 DBG_PRINT(INIT_DBG, 659 DBG_PRINT(INIT_DBG,
660 "%s: Freeing TxDL with zero DMA addr. ", dev->name); 660 "%s: Freeing TxDL with zero DMA addr. ",
661 DBG_PRINT(INIT_DBG, "Virtual address %llx\n", 661 dev->name);
662 (u64)(mac_control->zerodma_virt_addr)); 662 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
663 mac_control->zerodma_virt_addr);
663 } 664 }
664 kfree(mac_control->fifos[i].list_info); 665 kfree(mac_control->fifos[i].list_info);
665 } 666 }
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 6ee4771addf1..2e72d79a143c 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -5216,17 +5216,15 @@ static struct pci_device_id skge_pci_tbl[] = {
5216 { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5216 { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5217 { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5217 { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5218 { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5218 { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5219 { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5219/* DLink card does not have valid VPD so this driver gags
5220 * { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5221 */
5220 { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5222 { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5221#if 0 /* don't handle Yukon2 cards at the moment -- mlindner@syskonnect.de */
5222 { PCI_VENDOR_ID_MARVELL, 0x4360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5223 { PCI_VENDOR_ID_MARVELL, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5224#endif
5225 { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5223 { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5226 { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5224 { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5227 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5225 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
5228 { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5226 { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5229 { 0, } 5227 { 0 }
5230}; 5228};
5231 5229
5232MODULE_DEVICE_TABLE(pci, skge_pci_tbl); 5230MODULE_DEVICE_TABLE(pci, skge_pci_tbl);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 757c83387a29..7ce734ec6ba8 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.9" 45#define DRV_VERSION "1.0"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -669,7 +669,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
669 PHY_M_LED_BLINK_RT(BLINK_84MS) | 669 PHY_M_LED_BLINK_RT(BLINK_84MS) |
670 PHY_M_LEDC_TX_CTRL | 670 PHY_M_LEDC_TX_CTRL |
671 PHY_M_LEDC_DP_CTRL); 671 PHY_M_LEDC_DP_CTRL);
672 672
673 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 673 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
674 PHY_M_LED_MO_RX(MO_LED_OFF) | 674 PHY_M_LED_MO_RX(MO_LED_OFF) |
675 (skge->speed == SPEED_100 ? 675 (skge->speed == SPEED_100 ?
@@ -877,7 +877,7 @@ static int skge_rx_fill(struct skge_port *skge)
877 877
878static void skge_link_up(struct skge_port *skge) 878static void skge_link_up(struct skge_port *skge)
879{ 879{
880 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), 880 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
881 LED_BLK_OFF|LED_SYNC_OFF|LED_ON); 881 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
882 882
883 netif_carrier_on(skge->netdev); 883 netif_carrier_on(skge->netdev);
@@ -988,6 +988,8 @@ static void genesis_reset(struct skge_hw *hw, int port)
988{ 988{
989 const u8 zero[8] = { 0 }; 989 const u8 zero[8] = { 0 };
990 990
991 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
992
991 /* reset the statistics module */ 993 /* reset the statistics module */
992 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 994 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
993 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ 995 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
@@ -1022,8 +1024,6 @@ static void bcom_check_link(struct skge_hw *hw, int port)
1022 (void) xm_phy_read(hw, port, PHY_BCOM_STAT); 1024 (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
1023 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1025 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1024 1026
1025 pr_debug("bcom_check_link status=0x%x\n", status);
1026
1027 if ((status & PHY_ST_LSYNC) == 0) { 1027 if ((status & PHY_ST_LSYNC) == 0) {
1028 u16 cmd = xm_read16(hw, port, XM_MMU_CMD); 1028 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
1029 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1029 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
@@ -1107,8 +1107,6 @@ static void bcom_phy_init(struct skge_port *skge, int jumbo)
1107 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1107 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1108 }; 1108 };
1109 1109
1110 pr_debug("bcom_phy_init\n");
1111
1112 /* read Id from external PHY (all have the same address) */ 1110 /* read Id from external PHY (all have the same address) */
1113 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); 1111 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1114 1112
@@ -1341,6 +1339,8 @@ static void genesis_stop(struct skge_port *skge)
1341 int port = skge->port; 1339 int port = skge->port;
1342 u32 reg; 1340 u32 reg;
1343 1341
1342 genesis_reset(hw, port);
1343
1344 /* Clear Tx packet arbiter timeout IRQ */ 1344 /* Clear Tx packet arbiter timeout IRQ */
1345 skge_write16(hw, B3_PA_CTRL, 1345 skge_write16(hw, B3_PA_CTRL,
1346 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); 1346 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
@@ -1466,7 +1466,6 @@ static void genesis_link_up(struct skge_port *skge)
1466 u16 cmd; 1466 u16 cmd;
1467 u32 mode, msk; 1467 u32 mode, msk;
1468 1468
1469 pr_debug("genesis_link_up\n");
1470 cmd = xm_read16(hw, port, XM_MMU_CMD); 1469 cmd = xm_read16(hw, port, XM_MMU_CMD);
1471 1470
1472 /* 1471 /*
@@ -1579,7 +1578,6 @@ static void yukon_init(struct skge_hw *hw, int port)
1579 struct skge_port *skge = netdev_priv(hw->dev[port]); 1578 struct skge_port *skge = netdev_priv(hw->dev[port]);
1580 u16 ctrl, ct1000, adv; 1579 u16 ctrl, ct1000, adv;
1581 1580
1582 pr_debug("yukon_init\n");
1583 if (skge->autoneg == AUTONEG_ENABLE) { 1581 if (skge->autoneg == AUTONEG_ENABLE) {
1584 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1582 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1585 1583
@@ -1678,9 +1676,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1678 1676
1679 /* WA code for COMA mode -- set PHY reset */ 1677 /* WA code for COMA mode -- set PHY reset */
1680 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1678 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1681 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1679 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1682 skge_write32(hw, B2_GP_IO, 1680 reg = skge_read32(hw, B2_GP_IO);
1683 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9)); 1681 reg |= GP_DIR_9 | GP_IO_9;
1682 skge_write32(hw, B2_GP_IO, reg);
1683 }
1684 1684
1685 /* hard reset */ 1685 /* hard reset */
1686 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1686 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
@@ -1688,10 +1688,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1688 1688
1689 /* WA code for COMA mode -- clear PHY reset */ 1689 /* WA code for COMA mode -- clear PHY reset */
1690 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1690 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1691 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1691 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1692 skge_write32(hw, B2_GP_IO, 1692 reg = skge_read32(hw, B2_GP_IO);
1693 (skge_read32(hw, B2_GP_IO) | GP_DIR_9) 1693 reg |= GP_DIR_9;
1694 & ~GP_IO_9); 1694 reg &= ~GP_IO_9;
1695 skge_write32(hw, B2_GP_IO, reg);
1696 }
1695 1697
1696 /* Set hardware config mode */ 1698 /* Set hardware config mode */
1697 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 1699 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
@@ -1730,7 +1732,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1730 } 1732 }
1731 1733
1732 gma_write16(hw, port, GM_GP_CTRL, reg); 1734 gma_write16(hw, port, GM_GP_CTRL, reg);
1733 skge_read16(hw, GMAC_IRQ_SRC); 1735 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
1734 1736
1735 yukon_init(hw, port); 1737 yukon_init(hw, port);
1736 1738
@@ -1802,20 +1804,26 @@ static void yukon_stop(struct skge_port *skge)
1802 struct skge_hw *hw = skge->hw; 1804 struct skge_hw *hw = skge->hw;
1803 int port = skge->port; 1805 int port = skge->port;
1804 1806
1805 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1807 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1806 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 1808 yukon_reset(hw, port);
1807 skge_write32(hw, B2_GP_IO,
1808 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
1809 }
1810 1809
1811 gma_write16(hw, port, GM_GP_CTRL, 1810 gma_write16(hw, port, GM_GP_CTRL,
1812 gma_read16(hw, port, GM_GP_CTRL) 1811 gma_read16(hw, port, GM_GP_CTRL)
1813 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); 1812 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
1814 gma_read16(hw, port, GM_GP_CTRL); 1813 gma_read16(hw, port, GM_GP_CTRL);
1815 1814
1815 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1816 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1817 u32 io = skge_read32(hw, B2_GP_IO);
1818
1819 io |= GP_DIR_9 | GP_IO_9;
1820 skge_write32(hw, B2_GP_IO, io);
1821 skge_read32(hw, B2_GP_IO);
1822 }
1823
1816 /* set GPHY Control reset */ 1824 /* set GPHY Control reset */
1817 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1825 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1818 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 1826 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1819} 1827}
1820 1828
1821static void yukon_get_stats(struct skge_port *skge, u64 *data) 1829static void yukon_get_stats(struct skge_port *skge, u64 *data)
@@ -1874,10 +1882,8 @@ static void yukon_link_up(struct skge_port *skge)
1874 int port = skge->port; 1882 int port = skge->port;
1875 u16 reg; 1883 u16 reg;
1876 1884
1877 pr_debug("yukon_link_up\n");
1878
1879 /* Enable Transmit FIFO Underrun */ 1885 /* Enable Transmit FIFO Underrun */
1880 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK); 1886 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1881 1887
1882 reg = gma_read16(hw, port, GM_GP_CTRL); 1888 reg = gma_read16(hw, port, GM_GP_CTRL);
1883 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 1889 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
@@ -1897,7 +1903,6 @@ static void yukon_link_down(struct skge_port *skge)
1897 int port = skge->port; 1903 int port = skge->port;
1898 u16 ctrl; 1904 u16 ctrl;
1899 1905
1900 pr_debug("yukon_link_down\n");
1901 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); 1906 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1902 1907
1903 ctrl = gma_read16(hw, port, GM_GP_CTRL); 1908 ctrl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2113,7 +2118,6 @@ static int skge_up(struct net_device *dev)
2113 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2118 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2114 skge_led(skge, LED_MODE_ON); 2119 skge_led(skge, LED_MODE_ON);
2115 2120
2116 pr_debug("skge_up completed\n");
2117 return 0; 2121 return 0;
2118 2122
2119 free_rx_ring: 2123 free_rx_ring:
@@ -2136,15 +2140,20 @@ static int skge_down(struct net_device *dev)
2136 2140
2137 netif_stop_queue(dev); 2141 netif_stop_queue(dev);
2138 2142
2143 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
2144 if (hw->chip_id == CHIP_ID_GENESIS)
2145 genesis_stop(skge);
2146 else
2147 yukon_stop(skge);
2148
2149 hw->intr_mask &= ~portirqmask[skge->port];
2150 skge_write32(hw, B0_IMSK, hw->intr_mask);
2151
2139 /* Stop transmitter */ 2152 /* Stop transmitter */
2140 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2153 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2141 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2154 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2142 RB_RST_SET|RB_DIS_OP_MD); 2155 RB_RST_SET|RB_DIS_OP_MD);
2143 2156
2144 if (hw->chip_id == CHIP_ID_GENESIS)
2145 genesis_stop(skge);
2146 else
2147 yukon_stop(skge);
2148 2157
2149 /* Disable Force Sync bit and Enable Alloc bit */ 2158 /* Disable Force Sync bit and Enable Alloc bit */
2150 skge_write8(hw, SK_REG(port, TXA_CTRL), 2159 skge_write8(hw, SK_REG(port, TXA_CTRL),
@@ -2368,8 +2377,6 @@ static void genesis_set_multicast(struct net_device *dev)
2368 u32 mode; 2377 u32 mode;
2369 u8 filter[8]; 2378 u8 filter[8];
2370 2379
2371 pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count);
2372
2373 mode = xm_read32(hw, port, XM_MODE); 2380 mode = xm_read32(hw, port, XM_MODE);
2374 mode |= XM_MD_ENA_HASH; 2381 mode |= XM_MD_ENA_HASH;
2375 if (dev->flags & IFF_PROMISC) 2382 if (dev->flags & IFF_PROMISC)
@@ -2531,8 +2538,6 @@ static int skge_poll(struct net_device *dev, int *budget)
2531 unsigned int to_do = min(dev->quota, *budget); 2538 unsigned int to_do = min(dev->quota, *budget);
2532 unsigned int work_done = 0; 2539 unsigned int work_done = 0;
2533 2540
2534 pr_debug("skge_poll\n");
2535
2536 for (e = ring->to_clean; work_done < to_do; e = e->next) { 2541 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2537 struct skge_rx_desc *rd = e->desc; 2542 struct skge_rx_desc *rd = e->desc;
2538 struct sk_buff *skb; 2543 struct sk_buff *skb;
@@ -2673,9 +2678,9 @@ static void skge_error_irq(struct skge_hw *hw)
2673 if (hw->chip_id == CHIP_ID_GENESIS) { 2678 if (hw->chip_id == CHIP_ID_GENESIS) {
2674 /* clear xmac errors */ 2679 /* clear xmac errors */
2675 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 2680 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2676 skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT); 2681 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
2677 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 2682 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2678 skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT); 2683 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
2679 } else { 2684 } else {
2680 /* Timestamp (unused) overflow */ 2685 /* Timestamp (unused) overflow */
2681 if (hwstatus & IS_IRQ_TIST_OV) 2686 if (hwstatus & IS_IRQ_TIST_OV)
@@ -3001,9 +3006,6 @@ static int skge_reset(struct skge_hw *hw)
3001 3006
3002 skge_write32(hw, B0_IMSK, hw->intr_mask); 3007 skge_write32(hw, B0_IMSK, hw->intr_mask);
3003 3008
3004 if (hw->chip_id != CHIP_ID_GENESIS)
3005 skge_write8(hw, GMAC_IRQ_MSK, 0);
3006
3007 spin_lock_bh(&hw->phy_lock); 3009 spin_lock_bh(&hw->phy_lock);
3008 for (i = 0; i < hw->ports; i++) { 3010 for (i = 0; i < hw->ports; i++) {
3009 if (hw->chip_id == CHIP_ID_GENESIS) 3011 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -3232,6 +3234,11 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3232 dev0 = hw->dev[0]; 3234 dev0 = hw->dev[0];
3233 unregister_netdev(dev0); 3235 unregister_netdev(dev0);
3234 3236
3237 skge_write32(hw, B0_IMSK, 0);
3238 skge_write16(hw, B0_LED, LED_STAT_OFF);
3239 skge_pci_clear(hw);
3240 skge_write8(hw, B0_CTST, CS_RST_SET);
3241
3235 tasklet_kill(&hw->ext_tasklet); 3242 tasklet_kill(&hw->ext_tasklet);
3236 3243
3237 free_irq(pdev->irq, hw); 3244 free_irq(pdev->irq, hw);
@@ -3240,7 +3247,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3240 if (dev1) 3247 if (dev1)
3241 free_netdev(dev1); 3248 free_netdev(dev1);
3242 free_netdev(dev0); 3249 free_netdev(dev0);
3243 skge_write16(hw, B0_LED, LED_STAT_OFF); 3250
3244 iounmap(hw->regs); 3251 iounmap(hw->regs);
3245 kfree(hw); 3252 kfree(hw);
3246 pci_set_drvdata(pdev, NULL); 3253 pci_set_drvdata(pdev, NULL);
@@ -3259,7 +3266,10 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3259 struct skge_port *skge = netdev_priv(dev); 3266 struct skge_port *skge = netdev_priv(dev);
3260 if (netif_running(dev)) { 3267 if (netif_running(dev)) {
3261 netif_carrier_off(dev); 3268 netif_carrier_off(dev);
3262 skge_down(dev); 3269 if (skge->wol)
3270 netif_stop_queue(dev);
3271 else
3272 skge_down(dev);
3263 } 3273 }
3264 netif_device_detach(dev); 3274 netif_device_detach(dev);
3265 wol |= skge->wol; 3275 wol |= skge->wol;
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index f1680beb8e68..efbf98c675d2 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2008,7 +2008,7 @@ enum {
2008 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ 2008 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
2009 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ 2009 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
2010 2010
2011#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV | GM_IS_TX_FF_UR) 2011#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR)
2012 2012
2013/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ 2013/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
2014 /* Bits 15.. 2: reserved */ 2014 /* Bits 15.. 2: reserved */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 4e19220473d0..c796f41b4a52 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1817,6 +1817,10 @@ spider_net_setup_phy(struct spider_net_card *card)
1817 /* LEDs active in both modes, autosense prio = fiber */ 1817 /* LEDs active in both modes, autosense prio = fiber */
1818 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f); 1818 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
1819 1819
1820 /* switch off fibre autoneg */
1821 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01);
1822 spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004);
1823
1820 phy->def->ops->read_link(phy); 1824 phy->def->ops->read_link(phy);
1821 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name, 1825 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
1822 phy->speed, phy->duplex==1 ? "Full" : "Half"); 1826 phy->speed, phy->duplex==1 ? "Full" : "Half");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7599f52e15b3..81f4aedf534c 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
67 67
68#define DRV_MODULE_NAME "tg3" 68#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": " 69#define PFX DRV_MODULE_NAME ": "
70#define DRV_MODULE_VERSION "3.39" 70#define DRV_MODULE_VERSION "3.40"
71#define DRV_MODULE_RELDATE "September 5, 2005" 71#define DRV_MODULE_RELDATE "September 15, 2005"
72 72
73#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -3442,31 +3442,47 @@ static void tg3_tx_timeout(struct net_device *dev)
3442 schedule_work(&tp->reset_task); 3442 schedule_work(&tp->reset_task);
3443} 3443}
3444 3444
3445/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3446static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3447{
3448 u32 base = (u32) mapping & 0xffffffff;
3449
3450 return ((base > 0xffffdcc0) &&
3451 (base + len + 8 < base));
3452}
3453
3445static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); 3454static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3446 3455
3447static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 3456static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3448 u32 guilty_entry, int guilty_len, 3457 u32 last_plus_one, u32 *start,
3449 u32 last_plus_one, u32 *start, u32 mss) 3458 u32 base_flags, u32 mss)
3450{ 3459{
3451 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC); 3460 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3452 dma_addr_t new_addr; 3461 dma_addr_t new_addr = 0;
3453 u32 entry = *start; 3462 u32 entry = *start;
3454 int i; 3463 int i, ret = 0;
3455 3464
3456 if (!new_skb) { 3465 if (!new_skb) {
3457 dev_kfree_skb(skb); 3466 ret = -1;
3458 return -1; 3467 } else {
3468 /* New SKB is guaranteed to be linear. */
3469 entry = *start;
3470 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3471 PCI_DMA_TODEVICE);
3472 /* Make sure new skb does not cross any 4G boundaries.
3473 * Drop the packet if it does.
3474 */
3475 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3476 ret = -1;
3477 dev_kfree_skb(new_skb);
3478 new_skb = NULL;
3479 } else {
3480 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3481 base_flags, 1 | (mss << 1));
3482 *start = NEXT_TX(entry);
3483 }
3459 } 3484 }
3460 3485
3461 /* New SKB is guaranteed to be linear. */
3462 entry = *start;
3463 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3464 PCI_DMA_TODEVICE);
3465 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3466 (skb->ip_summed == CHECKSUM_HW) ?
3467 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3468 *start = NEXT_TX(entry);
3469
3470 /* Now clean up the sw ring entries. */ 3486 /* Now clean up the sw ring entries. */
3471 i = 0; 3487 i = 0;
3472 while (entry != last_plus_one) { 3488 while (entry != last_plus_one) {
@@ -3491,7 +3507,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3491 3507
3492 dev_kfree_skb(skb); 3508 dev_kfree_skb(skb);
3493 3509
3494 return 0; 3510 return ret;
3495} 3511}
3496 3512
3497static void tg3_set_txd(struct tg3 *tp, int entry, 3513static void tg3_set_txd(struct tg3 *tp, int entry,
@@ -3517,19 +3533,10 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
3517 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; 3533 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3518} 3534}
3519 3535
3520static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3521{
3522 u32 base = (u32) mapping & 0xffffffff;
3523
3524 return ((base > 0xffffdcc0) &&
3525 (base + len + 8 < base));
3526}
3527
3528static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 3536static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3529{ 3537{
3530 struct tg3 *tp = netdev_priv(dev); 3538 struct tg3 *tp = netdev_priv(dev);
3531 dma_addr_t mapping; 3539 dma_addr_t mapping;
3532 unsigned int i;
3533 u32 len, entry, base_flags, mss; 3540 u32 len, entry, base_flags, mss;
3534 int would_hit_hwbug; 3541 int would_hit_hwbug;
3535 3542
@@ -3624,7 +3631,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3624 would_hit_hwbug = 0; 3631 would_hit_hwbug = 0;
3625 3632
3626 if (tg3_4g_overflow_test(mapping, len)) 3633 if (tg3_4g_overflow_test(mapping, len))
3627 would_hit_hwbug = entry + 1; 3634 would_hit_hwbug = 1;
3628 3635
3629 tg3_set_txd(tp, entry, mapping, len, base_flags, 3636 tg3_set_txd(tp, entry, mapping, len, base_flags,
3630 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 3637 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
@@ -3648,12 +3655,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3648 tp->tx_buffers[entry].skb = NULL; 3655 tp->tx_buffers[entry].skb = NULL;
3649 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); 3656 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3650 3657
3651 if (tg3_4g_overflow_test(mapping, len)) { 3658 if (tg3_4g_overflow_test(mapping, len))
3652 /* Only one should match. */ 3659 would_hit_hwbug = 1;
3653 if (would_hit_hwbug)
3654 BUG();
3655 would_hit_hwbug = entry + 1;
3656 }
3657 3660
3658 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 3661 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3659 tg3_set_txd(tp, entry, mapping, len, 3662 tg3_set_txd(tp, entry, mapping, len,
@@ -3669,34 +3672,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3669 if (would_hit_hwbug) { 3672 if (would_hit_hwbug) {
3670 u32 last_plus_one = entry; 3673 u32 last_plus_one = entry;
3671 u32 start; 3674 u32 start;
3672 unsigned int len = 0;
3673
3674 would_hit_hwbug -= 1;
3675 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3676 entry &= (TG3_TX_RING_SIZE - 1);
3677 start = entry;
3678 i = 0;
3679 while (entry != last_plus_one) {
3680 if (i == 0)
3681 len = skb_headlen(skb);
3682 else
3683 len = skb_shinfo(skb)->frags[i-1].size;
3684 3675
3685 if (entry == would_hit_hwbug) 3676 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3686 break; 3677 start &= (TG3_TX_RING_SIZE - 1);
3687
3688 i++;
3689 entry = NEXT_TX(entry);
3690
3691 }
3692 3678
3693 /* If the workaround fails due to memory/mapping 3679 /* If the workaround fails due to memory/mapping
3694 * failure, silently drop this packet. 3680 * failure, silently drop this packet.
3695 */ 3681 */
3696 if (tigon3_4gb_hwbug_workaround(tp, skb, 3682 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3697 entry, len, 3683 &start, base_flags, mss))
3698 last_plus_one,
3699 &start, mss))
3700 goto out_unlock; 3684 goto out_unlock;
3701 3685
3702 entry = start; 3686 entry = start;
@@ -9271,6 +9255,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9271 static struct pci_device_id write_reorder_chipsets[] = { 9255 static struct pci_device_id write_reorder_chipsets[] = {
9272 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9256 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9273 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9257 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9258 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9259 PCI_DEVICE_ID_AMD_K8_NB) },
9274 { }, 9260 { },
9275 }; 9261 };
9276 u32 misc_ctrl_reg; 9262 u32 misc_ctrl_reg;
@@ -9285,7 +9271,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9285 tp->tg3_flags2 |= TG3_FLG2_SUN_570X; 9271 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9286#endif 9272#endif
9287 9273
9288 /* If we have an AMD 762 chipset, write 9274 /* If we have an AMD 762 or K8 chipset, write
9289 * reordering to the mailbox registers done by the host 9275 * reordering to the mailbox registers done by the host
9290 * controller can cause major troubles. We read back from 9276 * controller can cause major troubles. We read back from
9291 * every mailbox register write to force the writes to be 9277 * every mailbox register write to force the writes to be
@@ -9532,7 +9518,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9532 tp->write32_rx_mbox = tg3_write_indirect_mbox; 9518 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9533 9519
9534 iounmap(tp->regs); 9520 iounmap(tp->regs);
9535 tp->regs = 0; 9521 tp->regs = NULL;
9536 9522
9537 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 9523 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9538 pci_cmd &= ~PCI_COMMAND_MEMORY; 9524 pci_cmd &= ~PCI_COMMAND_MEMORY;
@@ -10680,7 +10666,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10680err_out_iounmap: 10666err_out_iounmap:
10681 if (tp->regs) { 10667 if (tp->regs) {
10682 iounmap(tp->regs); 10668 iounmap(tp->regs);
10683 tp->regs = 0; 10669 tp->regs = NULL;
10684 } 10670 }
10685 10671
10686err_out_free_dev: 10672err_out_free_dev:
@@ -10705,7 +10691,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
10705 unregister_netdev(dev); 10691 unregister_netdev(dev);
10706 if (tp->regs) { 10692 if (tp->regs) {
10707 iounmap(tp->regs); 10693 iounmap(tp->regs);
10708 tp->regs = 0; 10694 tp->regs = NULL;
10709 } 10695 }
10710 free_netdev(dev); 10696 free_netdev(dev);
10711 pci_release_regions(pdev); 10697 pci_release_regions(pdev);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 26cc4f6378c7..60d1e05ab732 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -117,7 +117,7 @@ static int xircom_open(struct net_device *dev);
117static int xircom_close(struct net_device *dev); 117static int xircom_close(struct net_device *dev);
118static void xircom_up(struct xircom_private *card); 118static void xircom_up(struct xircom_private *card);
119static struct net_device_stats *xircom_get_stats(struct net_device *dev); 119static struct net_device_stats *xircom_get_stats(struct net_device *dev);
120#if CONFIG_NET_POLL_CONTROLLER 120#ifdef CONFIG_NET_POLL_CONTROLLER
121static void xircom_poll_controller(struct net_device *dev); 121static void xircom_poll_controller(struct net_device *dev);
122#endif 122#endif
123 123
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 48c03c11cd9a..a01efa6d5c62 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -72,7 +72,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
72 } 72 }
73 skb_reserve(skb, 4); 73 skb_reserve(skb, 4);
74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0); 74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
75 data = (cisco_packet*)skb->data; 75 data = (cisco_packet*)(skb->data + 4);
76 76
77 data->type = htonl(type); 77 data->type = htonl(type);
78 data->par1 = htonl(par1); 78 data->par1 = htonl(par1);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 2be65d308fbe..06998c2240d9 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -6852,7 +6852,10 @@ static inline char *airo_translate_scan(struct net_device *dev,
6852 /* Add frequency */ 6852 /* Add frequency */
6853 iwe.cmd = SIOCGIWFREQ; 6853 iwe.cmd = SIOCGIWFREQ;
6854 iwe.u.freq.m = le16_to_cpu(bss->dsChannel); 6854 iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
6855 iwe.u.freq.m = frequency_list[iwe.u.freq.m] * 100000; 6855 /* iwe.u.freq.m containt the channel (starting 1), our
6856 * frequency_list array start at index 0...
6857 */
6858 iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
6856 iwe.u.freq.e = 1; 6859 iwe.u.freq.e = 1;
6857 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); 6860 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
6858 6861