aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c13
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/chelsio/sge.c6
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/dl2k.c25
-rw-r--r--drivers/net/e100.c5
-rw-r--r--drivers/net/e1000/e1000_main.c10
-rw-r--r--drivers/net/sky2.c32
-rw-r--r--drivers/net/tg3.c54
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/tulip/de2104x.c26
-rw-r--r--drivers/net/tun.c5
12 files changed, 120 insertions, 62 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 824e430486c2..830528dce0ca 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1574,6 +1574,7 @@ MODULE_LICENSE("GPL");
1574 1574
1575static int __init el3_init_module(void) 1575static int __init el3_init_module(void)
1576{ 1576{
1577 int ret = 0;
1577 el3_cards = 0; 1578 el3_cards = 0;
1578 1579
1579 if (debug >= 0) 1580 if (debug >= 0)
@@ -1589,14 +1590,16 @@ static int __init el3_init_module(void)
1589 } 1590 }
1590 1591
1591#ifdef CONFIG_EISA 1592#ifdef CONFIG_EISA
1592 if (eisa_driver_register (&el3_eisa_driver) < 0) { 1593 ret = eisa_driver_register(&el3_eisa_driver);
1593 eisa_driver_unregister (&el3_eisa_driver);
1594 }
1595#endif 1594#endif
1596#ifdef CONFIG_MCA 1595#ifdef CONFIG_MCA
1597 mca_register_driver(&el3_mca_driver); 1596 {
1597 int err = mca_register_driver(&el3_mca_driver);
1598 if (ret == 0)
1599 ret = err;
1600 }
1598#endif 1601#endif
1599 return 0; 1602 return ret;
1600} 1603}
1601 1604
1602static void __exit el3_cleanup_module(void) 1605static void __exit el3_cleanup_module(void)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e45a8f959719..aa633fa95e64 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1087,7 +1087,8 @@ config NE2000
1087 without a specific driver are compatible with NE2000. 1087 without a specific driver are compatible with NE2000.
1088 1088
1089 If you have a PCI NE2000 card however, say N here and Y to "PCI 1089 If you have a PCI NE2000 card however, say N here and Y to "PCI
1090 NE2000 support", above. If you have a NE2000 card and are running on 1090 NE2000 and clone support" under "EISA, VLB, PCI and on board
1091 controllers" below. If you have a NE2000 card and are running on
1091 an MCA system (a bus system used on some IBM PS/2 computers and 1092 an MCA system (a bus system used on some IBM PS/2 computers and
1092 laptops), say N here and Y to "NE/2 (ne2000 MCA version) support", 1093 laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
1093 below. 1094 below.
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 2c5b849b7ba4..30ff8ea1a402 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1021,7 +1021,7 @@ static void restart_tx_queues(struct sge *sge)
1021 if (test_and_clear_bit(nd->if_port, 1021 if (test_and_clear_bit(nd->if_port,
1022 &sge->stopped_tx_queues) && 1022 &sge->stopped_tx_queues) &&
1023 netif_running(nd)) { 1023 netif_running(nd)) {
1024 sge->stats.cmdQ_restarted[3]++; 1024 sge->stats.cmdQ_restarted[2]++;
1025 netif_wake_queue(nd); 1025 netif_wake_queue(nd);
1026 } 1026 }
1027 } 1027 }
@@ -1350,7 +1350,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1350 if (unlikely(credits < count)) { 1350 if (unlikely(credits < count)) {
1351 netif_stop_queue(dev); 1351 netif_stop_queue(dev);
1352 set_bit(dev->if_port, &sge->stopped_tx_queues); 1352 set_bit(dev->if_port, &sge->stopped_tx_queues);
1353 sge->stats.cmdQ_full[3]++; 1353 sge->stats.cmdQ_full[2]++;
1354 spin_unlock(&q->lock); 1354 spin_unlock(&q->lock);
1355 if (!netif_queue_stopped(dev)) 1355 if (!netif_queue_stopped(dev))
1356 CH_ERR("%s: Tx ring full while queue awake!\n", 1356 CH_ERR("%s: Tx ring full while queue awake!\n",
@@ -1358,7 +1358,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1358 return NETDEV_TX_BUSY; 1358 return NETDEV_TX_BUSY;
1359 } 1359 }
1360 if (unlikely(credits - count < q->stop_thres)) { 1360 if (unlikely(credits - count < q->stop_thres)) {
1361 sge->stats.cmdQ_full[3]++; 1361 sge->stats.cmdQ_full[2]++;
1362 netif_stop_queue(dev); 1362 netif_stop_queue(dev);
1363 set_bit(dev->if_port, &sge->stopped_tx_queues); 1363 set_bit(dev->if_port, &sge->stopped_tx_queues);
1364 } 1364 }
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 0069f5fa973a..22fc5b869a60 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -1012,7 +1012,7 @@ static int __init read_eeprom(struct net_device *dev)
1012#ifdef MODULE 1012#ifdef MODULE
1013static struct net_device *de620_dev; 1013static struct net_device *de620_dev;
1014 1014
1015int init_module(void) 1015int __init init_module(void)
1016{ 1016{
1017 de620_dev = de620_probe(-1); 1017 de620_dev = de620_probe(-1);
1018 if (IS_ERR(de620_dev)) 1018 if (IS_ERR(de620_dev))
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 430c628279b3..fb9dae302dcc 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -50,8 +50,8 @@
50 50
51*/ 51*/
52#define DRV_NAME "D-Link DL2000-based linux driver" 52#define DRV_NAME "D-Link DL2000-based linux driver"
53#define DRV_VERSION "v1.17a" 53#define DRV_VERSION "v1.17b"
54#define DRV_RELDATE "2002/10/04" 54#define DRV_RELDATE "2006/03/10"
55#include "dl2k.h" 55#include "dl2k.h"
56 56
57static char version[] __devinitdata = 57static char version[] __devinitdata =
@@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq)
765 break; 765 break;
766 skb = np->tx_skbuff[entry]; 766 skb = np->tx_skbuff[entry];
767 pci_unmap_single (np->pdev, 767 pci_unmap_single (np->pdev,
768 np->tx_ring[entry].fraginfo, 768 np->tx_ring[entry].fraginfo & 0xffffffffffff,
769 skb->len, PCI_DMA_TODEVICE); 769 skb->len, PCI_DMA_TODEVICE);
770 if (irq) 770 if (irq)
771 dev_kfree_skb_irq (skb); 771 dev_kfree_skb_irq (skb);
@@ -892,14 +892,16 @@ receive_packet (struct net_device *dev)
892 892
893 /* Small skbuffs for short packets */ 893 /* Small skbuffs for short packets */
894 if (pkt_len > copy_thresh) { 894 if (pkt_len > copy_thresh) {
895 pci_unmap_single (np->pdev, desc->fraginfo, 895 pci_unmap_single (np->pdev,
896 desc->fraginfo & 0xffffffffffff,
896 np->rx_buf_sz, 897 np->rx_buf_sz,
897 PCI_DMA_FROMDEVICE); 898 PCI_DMA_FROMDEVICE);
898 skb_put (skb = np->rx_skbuff[entry], pkt_len); 899 skb_put (skb = np->rx_skbuff[entry], pkt_len);
899 np->rx_skbuff[entry] = NULL; 900 np->rx_skbuff[entry] = NULL;
900 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 901 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
901 pci_dma_sync_single_for_cpu(np->pdev, 902 pci_dma_sync_single_for_cpu(np->pdev,
902 desc->fraginfo, 903 desc->fraginfo &
904 0xffffffffffff,
903 np->rx_buf_sz, 905 np->rx_buf_sz,
904 PCI_DMA_FROMDEVICE); 906 PCI_DMA_FROMDEVICE);
905 skb->dev = dev; 907 skb->dev = dev;
@@ -910,7 +912,8 @@ receive_packet (struct net_device *dev)
910 pkt_len, 0); 912 pkt_len, 0);
911 skb_put (skb, pkt_len); 913 skb_put (skb, pkt_len);
912 pci_dma_sync_single_for_device(np->pdev, 914 pci_dma_sync_single_for_device(np->pdev,
913 desc->fraginfo, 915 desc->fraginfo &
916 0xffffffffffff,
914 np->rx_buf_sz, 917 np->rx_buf_sz,
915 PCI_DMA_FROMDEVICE); 918 PCI_DMA_FROMDEVICE);
916 } 919 }
@@ -1796,8 +1799,9 @@ rio_close (struct net_device *dev)
1796 np->rx_ring[i].fraginfo = 0; 1799 np->rx_ring[i].fraginfo = 0;
1797 skb = np->rx_skbuff[i]; 1800 skb = np->rx_skbuff[i];
1798 if (skb) { 1801 if (skb) {
1799 pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo, 1802 pci_unmap_single(np->pdev,
1800 skb->len, PCI_DMA_FROMDEVICE); 1803 np->rx_ring[i].fraginfo & 0xffffffffffff,
1804 skb->len, PCI_DMA_FROMDEVICE);
1801 dev_kfree_skb (skb); 1805 dev_kfree_skb (skb);
1802 np->rx_skbuff[i] = NULL; 1806 np->rx_skbuff[i] = NULL;
1803 } 1807 }
@@ -1805,8 +1809,9 @@ rio_close (struct net_device *dev)
1805 for (i = 0; i < TX_RING_SIZE; i++) { 1809 for (i = 0; i < TX_RING_SIZE; i++) {
1806 skb = np->tx_skbuff[i]; 1810 skb = np->tx_skbuff[i];
1807 if (skb) { 1811 if (skb) {
1808 pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo, 1812 pci_unmap_single(np->pdev,
1809 skb->len, PCI_DMA_TODEVICE); 1813 np->tx_ring[i].fraginfo & 0xffffffffffff,
1814 skb->len, PCI_DMA_TODEVICE);
1810 dev_kfree_skb (skb); 1815 dev_kfree_skb (skb);
1811 np->tx_skbuff[i] = NULL; 1816 np->tx_skbuff[i] = NULL;
1812 } 1817 }
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 24253c807e55..f57a85feda3d 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2154,6 +2154,9 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2154 2154
2155 msleep(10); 2155 msleep(10);
2156 2156
2157 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2158 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2159
2157 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), 2160 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2158 skb->data, ETH_DATA_LEN)) 2161 skb->data, ETH_DATA_LEN))
2159 err = -EAGAIN; 2162 err = -EAGAIN;
@@ -2161,8 +2164,8 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2161err_loopback_none: 2164err_loopback_none:
2162 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); 2165 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2163 nic->loopback = lb_none; 2166 nic->loopback = lb_none;
2164 e100_hw_init(nic);
2165 e100_clean_cbs(nic); 2167 e100_clean_cbs(nic);
2168 e100_hw_reset(nic);
2166err_clean_rx: 2169err_clean_rx:
2167 e100_rx_clean_list(nic); 2170 e100_rx_clean_list(nic);
2168 return err; 2171 return err;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5b7d0f425af2..84dcca3776ee 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2917,7 +2917,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2917 if (!__pskb_pull_tail(skb, pull_size)) { 2917 if (!__pskb_pull_tail(skb, pull_size)) {
2918 printk(KERN_ERR "__pskb_pull_tail failed.\n"); 2918 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2919 dev_kfree_skb_any(skb); 2919 dev_kfree_skb_any(skb);
2920 return -EFAULT; 2920 return NETDEV_TX_OK;
2921 } 2921 }
2922 len = skb->len - skb->data_len; 2922 len = skb->len - skb->data_len;
2923 } 2923 }
@@ -3710,7 +3710,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3710 e1000_rx_checksum(adapter, 3710 e1000_rx_checksum(adapter,
3711 (uint32_t)(status) | 3711 (uint32_t)(status) |
3712 ((uint32_t)(rx_desc->errors) << 24), 3712 ((uint32_t)(rx_desc->errors) << 24),
3713 rx_desc->csum, skb); 3713 le16_to_cpu(rx_desc->csum), skb);
3714 3714
3715 skb->protocol = eth_type_trans(skb, netdev); 3715 skb->protocol = eth_type_trans(skb, netdev);
3716#ifdef CONFIG_E1000_NAPI 3716#ifdef CONFIG_E1000_NAPI
@@ -3854,11 +3854,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3854 } 3854 }
3855 3855
3856 e1000_rx_checksum(adapter, staterr, 3856 e1000_rx_checksum(adapter, staterr,
3857 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3857 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
3858 skb->protocol = eth_type_trans(skb, netdev); 3858 skb->protocol = eth_type_trans(skb, netdev);
3859 3859
3860 if (likely(rx_desc->wb.upper.header_status & 3860 if (likely(rx_desc->wb.upper.header_status &
3861 E1000_RXDPS_HDRSTAT_HDRSP)) 3861 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
3862 adapter->rx_hdr_split++; 3862 adapter->rx_hdr_split++;
3863#ifdef CONFIG_E1000_NAPI 3863#ifdef CONFIG_E1000_NAPI
3864 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3864 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
@@ -3884,7 +3884,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3884#endif 3884#endif
3885 3885
3886next_desc: 3886next_desc:
3887 rx_desc->wb.middle.status_error &= ~0xFF; 3887 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
3888 buffer_info->skb = NULL; 3888 buffer_info->skb = NULL;
3889 3889
3890 /* return some buffers to hardware, one at a time is too slow */ 3890 /* return some buffers to hardware, one at a time is too slow */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 72c1630977d6..73260364cba3 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -74,7 +74,7 @@
74#define TX_RING_SIZE 512 74#define TX_RING_SIZE 512
75#define TX_DEF_PENDING (TX_RING_SIZE - 1) 75#define TX_DEF_PENDING (TX_RING_SIZE - 1)
76#define TX_MIN_PENDING 64 76#define TX_MIN_PENDING 64
77#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS) 77#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
78 78
79#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */ 79#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
80#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) 80#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
@@ -622,8 +622,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
622 622
623 /* Configure Rx MAC FIFO */ 623 /* Configure Rx MAC FIFO */
624 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 624 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
625 sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T), 625 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
626 GMF_RX_CTRL_DEF); 626 GMF_OPER_ON | GMF_RX_F_FL_ON);
627 627
628 /* Flush Rx MAC FIFO on any flow control or error */ 628 /* Flush Rx MAC FIFO on any flow control or error */
629 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); 629 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
@@ -995,6 +995,10 @@ static int sky2_rx_start(struct sky2_port *sky2)
995 sky2_rx_add(sky2, re->mapaddr); 995 sky2_rx_add(sky2, re->mapaddr);
996 } 996 }
997 997
998 /* Truncate oversize frames */
999 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), sky2->rx_bufsize - 8);
1000 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1001
998 /* Tell chip about available buffers */ 1002 /* Tell chip about available buffers */
999 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); 1003 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
1000 sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX)); 1004 sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
@@ -1145,6 +1149,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1145 struct sky2_tx_le *le = NULL; 1149 struct sky2_tx_le *le = NULL;
1146 struct tx_ring_info *re; 1150 struct tx_ring_info *re;
1147 unsigned i, len; 1151 unsigned i, len;
1152 int avail;
1148 dma_addr_t mapping; 1153 dma_addr_t mapping;
1149 u32 addr64; 1154 u32 addr64;
1150 u16 mss; 1155 u16 mss;
@@ -1287,12 +1292,16 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1287 re->idx = sky2->tx_prod; 1292 re->idx = sky2->tx_prod;
1288 le->ctrl |= EOP; 1293 le->ctrl |= EOP;
1289 1294
1295 avail = tx_avail(sky2);
1296 if (mss != 0 || avail < TX_MIN_PENDING) {
1297 le->ctrl |= FRC_STAT;
1298 if (avail <= MAX_SKB_TX_LE)
1299 netif_stop_queue(dev);
1300 }
1301
1290 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod, 1302 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
1291 &sky2->tx_last_put, TX_RING_SIZE); 1303 &sky2->tx_last_put, TX_RING_SIZE);
1292 1304
1293 if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1294 netif_stop_queue(dev);
1295
1296out_unlock: 1305out_unlock:
1297 spin_unlock(&sky2->tx_lock); 1306 spin_unlock(&sky2->tx_lock);
1298 1307
@@ -1707,10 +1716,12 @@ static void sky2_tx_timeout(struct net_device *dev)
1707 1716
1708 1717
1709#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) 1718#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1710/* Want receive buffer size to be multiple of 64 bits, and incl room for vlan */ 1719/* Want receive buffer size to be multiple of 64 bits
1720 * and incl room for vlan and truncation
1721 */
1711static inline unsigned sky2_buf_size(int mtu) 1722static inline unsigned sky2_buf_size(int mtu)
1712{ 1723{
1713 return roundup(mtu + ETH_HLEN + 4, 8); 1724 return roundup(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8;
1714} 1725}
1715 1726
1716static int sky2_change_mtu(struct net_device *dev, int new_mtu) 1727static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -1793,7 +1804,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1793 if (!(status & GMR_FS_RX_OK)) 1804 if (!(status & GMR_FS_RX_OK))
1794 goto resubmit; 1805 goto resubmit;
1795 1806
1796 if ((status >> 16) != length || length > sky2->rx_bufsize) 1807 if (length > sky2->netdev->mtu + ETH_HLEN)
1797 goto oversize; 1808 goto oversize;
1798 1809
1799 if (length < copybreak) { 1810 if (length < copybreak) {
@@ -3243,8 +3254,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3243 } 3254 }
3244 } 3255 }
3245 3256
3246 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ | SA_SAMPLE_RANDOM, 3257 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
3247 DRV_NAME, hw);
3248 if (err) { 3258 if (err) {
3249 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3259 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3250 pci_name(pdev), pdev->irq); 3260 pci_name(pdev), pdev->irq);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 15545620ab0e..caf4102b54ce 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9552,12 +9552,36 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9552 } 9552 }
9553 } 9553 }
9554 9554
9555 /* Find msi capability. */ 9555 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9556 * DMA addresses > 40-bit. This bridge may have other additional
9557 * 57xx devices behind it in some 4-port NIC designs for example.
9558 * Any tg3 device found behind the bridge will also need the 40-bit
9559 * DMA workaround.
9560 */
9556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || 9561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { 9562 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9558 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; 9563 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9564 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9559 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 9565 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9560 } 9566 }
9567 else {
9568 struct pci_dev *bridge = NULL;
9569
9570 do {
9571 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
9572 PCI_DEVICE_ID_SERVERWORKS_EPB,
9573 bridge);
9574 if (bridge && bridge->subordinate &&
9575 (bridge->subordinate->number <=
9576 tp->pdev->bus->number) &&
9577 (bridge->subordinate->subordinate >=
9578 tp->pdev->bus->number)) {
9579 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9580 pci_dev_put(bridge);
9581 break;
9582 }
9583 } while (bridge);
9584 }
9561 9585
9562 /* Initialize misc host control in PCI block. */ 9586 /* Initialize misc host control in PCI block. */
9563 tp->misc_host_ctrl |= (misc_ctrl_reg & 9587 tp->misc_host_ctrl |= (misc_ctrl_reg &
@@ -10303,7 +10327,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
10303 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 10327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10304 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 10328 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10305 10329
10306 if (ccval == 0x6 || ccval == 0x7) 10330 /* If the 5704 is behind the EPB bridge, we can
10331 * do the less restrictive ONE_DMA workaround for
10332 * better performance.
10333 */
10334 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10335 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10336 tp->dma_rwctrl |= 0x8000;
10337 else if (ccval == 0x6 || ccval == 0x7)
10307 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 10338 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10308 10339
10309 /* Set bit 23 to enable PCIX hw bug fix */ 10340 /* Set bit 23 to enable PCIX hw bug fix */
@@ -10543,8 +10574,6 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10543 strcat(str, "66MHz"); 10574 strcat(str, "66MHz");
10544 else if (clock_ctrl == 6) 10575 else if (clock_ctrl == 6)
10545 strcat(str, "100MHz"); 10576 strcat(str, "100MHz");
10546 else if (clock_ctrl == 7)
10547 strcat(str, "133MHz");
10548 } else { 10577 } else {
10549 strcpy(str, "PCI:"); 10578 strcpy(str, "PCI:");
10550 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) 10579 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
@@ -10761,19 +10790,20 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10761 goto err_out_iounmap; 10790 goto err_out_iounmap;
10762 } 10791 }
10763 10792
10764 /* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit. 10793 /* The EPB bridge inside 5714, 5715, and 5780 and any
10794 * device behind the EPB cannot support DMA addresses > 40-bit.
10765 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 10795 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
10766 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 10796 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
10767 * do DMA address check in tg3_start_xmit(). 10797 * do DMA address check in tg3_start_xmit().
10768 */ 10798 */
10769 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 10799 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10800 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
10801 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
10770 persist_dma_mask = dma_mask = DMA_40BIT_MASK; 10802 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
10771#ifdef CONFIG_HIGHMEM 10803#ifdef CONFIG_HIGHMEM
10772 dma_mask = DMA_64BIT_MASK; 10804 dma_mask = DMA_64BIT_MASK;
10773#endif 10805#endif
10774 } else if (tp->tg3_flags2 & TG3_FLG2_IS_5788) 10806 } else
10775 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
10776 else
10777 persist_dma_mask = dma_mask = DMA_64BIT_MASK; 10807 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
10778 10808
10779 /* Configure DMA attributes. */ 10809 /* Configure DMA attributes. */
@@ -10910,8 +10940,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10910 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 10940 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10911 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 10941 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10912 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 10942 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10913 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n", 10943 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
10914 dev->name, tp->dma_rwctrl); 10944 dev->name, tp->dma_rwctrl,
10945 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
10946 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
10915 10947
10916 return 0; 10948 return 0;
10917 10949
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 7f4b7f6ac40d..7e3b613afb29 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2163,6 +2163,7 @@ struct tg3 {
2163#define TG3_FLAG_10_100_ONLY 0x01000000 2163#define TG3_FLAG_10_100_ONLY 0x01000000
2164#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 2164#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2165#define TG3_FLAG_IN_RESET_TASK 0x04000000 2165#define TG3_FLAG_IN_RESET_TASK 0x04000000
2166#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
2166#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 2167#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
2167#define TG3_FLAG_GOT_SERDES_FLOWCTL 0x20000000 2168#define TG3_FLAG_GOT_SERDES_FLOWCTL 0x20000000
2168#define TG3_FLAG_SPLIT_MODE 0x40000000 2169#define TG3_FLAG_SPLIT_MODE 0x40000000
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d7fb3ffe06ac..2d0cfbceee22 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1362,7 +1362,6 @@ static int de_open (struct net_device *dev)
1362{ 1362{
1363 struct de_private *de = dev->priv; 1363 struct de_private *de = dev->priv;
1364 int rc; 1364 int rc;
1365 unsigned long flags;
1366 1365
1367 if (netif_msg_ifup(de)) 1366 if (netif_msg_ifup(de))
1368 printk(KERN_DEBUG "%s: enabling interface\n", dev->name); 1367 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
@@ -1376,18 +1375,20 @@ static int de_open (struct net_device *dev)
1376 return rc; 1375 return rc;
1377 } 1376 }
1378 1377
1379 rc = de_init_hw(de); 1378 dw32(IntrMask, 0);
1380 if (rc) {
1381 printk(KERN_ERR "%s: h/w init failure, err=%d\n",
1382 dev->name, rc);
1383 goto err_out_free;
1384 }
1385 1379
1386 rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev); 1380 rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
1387 if (rc) { 1381 if (rc) {
1388 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n", 1382 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
1389 dev->name, dev->irq, rc); 1383 dev->name, dev->irq, rc);
1390 goto err_out_hw; 1384 goto err_out_free;
1385 }
1386
1387 rc = de_init_hw(de);
1388 if (rc) {
1389 printk(KERN_ERR "%s: h/w init failure, err=%d\n",
1390 dev->name, rc);
1391 goto err_out_free_irq;
1391 } 1392 }
1392 1393
1393 netif_start_queue(dev); 1394 netif_start_queue(dev);
@@ -1395,11 +1396,8 @@ static int de_open (struct net_device *dev)
1395 1396
1396 return 0; 1397 return 0;
1397 1398
1398err_out_hw: 1399err_out_free_irq:
1399 spin_lock_irqsave(&de->lock, flags); 1400 free_irq(dev->irq, dev);
1400 de_stop_hw(de);
1401 spin_unlock_irqrestore(&de->lock, flags);
1402
1403err_out_free: 1401err_out_free:
1404 de_free_rings(de); 1402 de_free_rings(de);
1405 return rc; 1403 return rc;
@@ -1455,6 +1453,8 @@ static void de_tx_timeout (struct net_device *dev)
1455 synchronize_irq(dev->irq); 1453 synchronize_irq(dev->irq);
1456 de_clean_rings(de); 1454 de_clean_rings(de);
1457 1455
1456 de_init_rings(de);
1457
1458 de_init_hw(de); 1458 de_init_hw(de);
1459 1459
1460 netif_wake_queue(dev); 1460 netif_wake_queue(dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 50b8c6754b1e..a1ed2d983740 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -249,8 +249,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
249 249
250 if (align) 250 if (align)
251 skb_reserve(skb, align); 251 skb_reserve(skb, align);
252 if (memcpy_fromiovec(skb_put(skb, len), iv, len)) 252 if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
253 tun->stats.rx_dropped++;
254 kfree_skb(skb);
253 return -EFAULT; 255 return -EFAULT;
256 }
254 257
255 skb->dev = tun->dev; 258 skb->dev = tun->dev;
256 switch (tun->flags & TUN_TYPE_MASK) { 259 switch (tun->flags & TUN_TYPE_MASK) {