aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-03-11 13:35:31 -0500
committerJeff Garzik <jeff@garzik.org>2006-03-11 13:35:31 -0500
commit749dfc70554f2c9e6624ac843d66571265ed9338 (patch)
treebf591255b3f158222f90852d53c4279e6e7e9ced /drivers/net
parent74f5ec29ae93aa42c49f4285c20c457afe937881 (diff)
parent0992a5d029181421877a716eaf99145828ff7eae (diff)
Merge branch 'upstream-fixes'
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c13
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/dl2k.c25
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/sky2.c32
-rw-r--r--drivers/net/tg3.c87
-rw-r--r--drivers/net/tulip/de2104x.c26
8 files changed, 120 insertions, 70 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 824e430486c2..830528dce0ca 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1574,6 +1574,7 @@ MODULE_LICENSE("GPL");
1574 1574
1575static int __init el3_init_module(void) 1575static int __init el3_init_module(void)
1576{ 1576{
1577 int ret = 0;
1577 el3_cards = 0; 1578 el3_cards = 0;
1578 1579
1579 if (debug >= 0) 1580 if (debug >= 0)
@@ -1589,14 +1590,16 @@ static int __init el3_init_module(void)
1589 } 1590 }
1590 1591
1591#ifdef CONFIG_EISA 1592#ifdef CONFIG_EISA
1592 if (eisa_driver_register (&el3_eisa_driver) < 0) { 1593 ret = eisa_driver_register(&el3_eisa_driver);
1593 eisa_driver_unregister (&el3_eisa_driver);
1594 }
1595#endif 1594#endif
1596#ifdef CONFIG_MCA 1595#ifdef CONFIG_MCA
1597 mca_register_driver(&el3_mca_driver); 1596 {
1597 int err = mca_register_driver(&el3_mca_driver);
1598 if (ret == 0)
1599 ret = err;
1600 }
1598#endif 1601#endif
1599 return 0; 1602 return ret;
1600} 1603}
1601 1604
1602static void __exit el3_cleanup_module(void) 1605static void __exit el3_cleanup_module(void)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index bdf294d2df26..d4d8e5f9ebf1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1087,7 +1087,8 @@ config NE2000
1087 without a specific driver are compatible with NE2000. 1087 without a specific driver are compatible with NE2000.
1088 1088
1089 If you have a PCI NE2000 card however, say N here and Y to "PCI 1089 If you have a PCI NE2000 card however, say N here and Y to "PCI
1090 NE2000 support", above. If you have a NE2000 card and are running on 1090 NE2000 and clone support" under "EISA, VLB, PCI and on board
1091 controllers" below. If you have a NE2000 card and are running on
1091 an MCA system (a bus system used on some IBM PS/2 computers and 1092 an MCA system (a bus system used on some IBM PS/2 computers and
1092 laptops), say N here and Y to "NE/2 (ne2000 MCA version) support", 1093 laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
1093 below. 1094 below.
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 0069f5fa973a..22fc5b869a60 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -1012,7 +1012,7 @@ static int __init read_eeprom(struct net_device *dev)
1012#ifdef MODULE 1012#ifdef MODULE
1013static struct net_device *de620_dev; 1013static struct net_device *de620_dev;
1014 1014
1015int init_module(void) 1015int __init init_module(void)
1016{ 1016{
1017 de620_dev = de620_probe(-1); 1017 de620_dev = de620_probe(-1);
1018 if (IS_ERR(de620_dev)) 1018 if (IS_ERR(de620_dev))
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 6376b63d9b17..1f3627470c95 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -50,8 +50,8 @@
50 50
51*/ 51*/
52#define DRV_NAME "D-Link DL2000-based linux driver" 52#define DRV_NAME "D-Link DL2000-based linux driver"
53#define DRV_VERSION "v1.17a" 53#define DRV_VERSION "v1.17b"
54#define DRV_RELDATE "2002/10/04" 54#define DRV_RELDATE "2006/03/10"
55#include "dl2k.h" 55#include "dl2k.h"
56 56
57static char version[] __devinitdata = 57static char version[] __devinitdata =
@@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq)
765 break; 765 break;
766 skb = np->tx_skbuff[entry]; 766 skb = np->tx_skbuff[entry];
767 pci_unmap_single (np->pdev, 767 pci_unmap_single (np->pdev,
768 np->tx_ring[entry].fraginfo, 768 np->tx_ring[entry].fraginfo & 0xffffffffffff,
769 skb->len, PCI_DMA_TODEVICE); 769 skb->len, PCI_DMA_TODEVICE);
770 if (irq) 770 if (irq)
771 dev_kfree_skb_irq (skb); 771 dev_kfree_skb_irq (skb);
@@ -892,14 +892,16 @@ receive_packet (struct net_device *dev)
892 892
893 /* Small skbuffs for short packets */ 893 /* Small skbuffs for short packets */
894 if (pkt_len > copy_thresh) { 894 if (pkt_len > copy_thresh) {
895 pci_unmap_single (np->pdev, desc->fraginfo, 895 pci_unmap_single (np->pdev,
896 desc->fraginfo & 0xffffffffffff,
896 np->rx_buf_sz, 897 np->rx_buf_sz,
897 PCI_DMA_FROMDEVICE); 898 PCI_DMA_FROMDEVICE);
898 skb_put (skb = np->rx_skbuff[entry], pkt_len); 899 skb_put (skb = np->rx_skbuff[entry], pkt_len);
899 np->rx_skbuff[entry] = NULL; 900 np->rx_skbuff[entry] = NULL;
900 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 901 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
901 pci_dma_sync_single_for_cpu(np->pdev, 902 pci_dma_sync_single_for_cpu(np->pdev,
902 desc->fraginfo, 903 desc->fraginfo &
904 0xffffffffffff,
903 np->rx_buf_sz, 905 np->rx_buf_sz,
904 PCI_DMA_FROMDEVICE); 906 PCI_DMA_FROMDEVICE);
905 skb->dev = dev; 907 skb->dev = dev;
@@ -910,7 +912,8 @@ receive_packet (struct net_device *dev)
910 pkt_len, 0); 912 pkt_len, 0);
911 skb_put (skb, pkt_len); 913 skb_put (skb, pkt_len);
912 pci_dma_sync_single_for_device(np->pdev, 914 pci_dma_sync_single_for_device(np->pdev,
913 desc->fraginfo, 915 desc->fraginfo &
916 0xffffffffffff,
914 np->rx_buf_sz, 917 np->rx_buf_sz,
915 PCI_DMA_FROMDEVICE); 918 PCI_DMA_FROMDEVICE);
916 } 919 }
@@ -1796,8 +1799,9 @@ rio_close (struct net_device *dev)
1796 np->rx_ring[i].fraginfo = 0; 1799 np->rx_ring[i].fraginfo = 0;
1797 skb = np->rx_skbuff[i]; 1800 skb = np->rx_skbuff[i];
1798 if (skb) { 1801 if (skb) {
1799 pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo, 1802 pci_unmap_single(np->pdev,
1800 skb->len, PCI_DMA_FROMDEVICE); 1803 np->rx_ring[i].fraginfo & 0xffffffffffff,
1804 skb->len, PCI_DMA_FROMDEVICE);
1801 dev_kfree_skb (skb); 1805 dev_kfree_skb (skb);
1802 np->rx_skbuff[i] = NULL; 1806 np->rx_skbuff[i] = NULL;
1803 } 1807 }
@@ -1805,8 +1809,9 @@ rio_close (struct net_device *dev)
1805 for (i = 0; i < TX_RING_SIZE; i++) { 1809 for (i = 0; i < TX_RING_SIZE; i++) {
1806 skb = np->tx_skbuff[i]; 1810 skb = np->tx_skbuff[i];
1807 if (skb) { 1811 if (skb) {
1808 pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo, 1812 pci_unmap_single(np->pdev,
1809 skb->len, PCI_DMA_TODEVICE); 1813 np->tx_ring[i].fraginfo & 0xffffffffffff,
1814 skb->len, PCI_DMA_TODEVICE);
1810 dev_kfree_skb (skb); 1815 dev_kfree_skb (skb);
1811 np->tx_skbuff[i] = NULL; 1816 np->tx_skbuff[i] = NULL;
1812 } 1817 }
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 9adaf5fa9d48..0c631a77ccf6 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2815,7 +2815,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2815 printk(KERN_ERR 2815 printk(KERN_ERR
2816 "__pskb_pull_tail failed.\n"); 2816 "__pskb_pull_tail failed.\n");
2817 dev_kfree_skb_any(skb); 2817 dev_kfree_skb_any(skb);
2818 return -EFAULT; 2818 return NETDEV_TX_OK;
2819 } 2819 }
2820 len = skb->len - skb->data_len; 2820 len = skb->len - skb->data_len;
2821 break; 2821 break;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 72c1630977d6..73260364cba3 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -74,7 +74,7 @@
74#define TX_RING_SIZE 512 74#define TX_RING_SIZE 512
75#define TX_DEF_PENDING (TX_RING_SIZE - 1) 75#define TX_DEF_PENDING (TX_RING_SIZE - 1)
76#define TX_MIN_PENDING 64 76#define TX_MIN_PENDING 64
77#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS) 77#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
78 78
79#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */ 79#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
80#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) 80#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
@@ -622,8 +622,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
622 622
623 /* Configure Rx MAC FIFO */ 623 /* Configure Rx MAC FIFO */
624 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 624 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
625 sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T), 625 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
626 GMF_RX_CTRL_DEF); 626 GMF_OPER_ON | GMF_RX_F_FL_ON);
627 627
628 /* Flush Rx MAC FIFO on any flow control or error */ 628 /* Flush Rx MAC FIFO on any flow control or error */
629 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); 629 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
@@ -995,6 +995,10 @@ static int sky2_rx_start(struct sky2_port *sky2)
995 sky2_rx_add(sky2, re->mapaddr); 995 sky2_rx_add(sky2, re->mapaddr);
996 } 996 }
997 997
998 /* Truncate oversize frames */
999 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), sky2->rx_bufsize - 8);
1000 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1001
998 /* Tell chip about available buffers */ 1002 /* Tell chip about available buffers */
999 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); 1003 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
1000 sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX)); 1004 sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
@@ -1145,6 +1149,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1145 struct sky2_tx_le *le = NULL; 1149 struct sky2_tx_le *le = NULL;
1146 struct tx_ring_info *re; 1150 struct tx_ring_info *re;
1147 unsigned i, len; 1151 unsigned i, len;
1152 int avail;
1148 dma_addr_t mapping; 1153 dma_addr_t mapping;
1149 u32 addr64; 1154 u32 addr64;
1150 u16 mss; 1155 u16 mss;
@@ -1287,12 +1292,16 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1287 re->idx = sky2->tx_prod; 1292 re->idx = sky2->tx_prod;
1288 le->ctrl |= EOP; 1293 le->ctrl |= EOP;
1289 1294
1295 avail = tx_avail(sky2);
1296 if (mss != 0 || avail < TX_MIN_PENDING) {
1297 le->ctrl |= FRC_STAT;
1298 if (avail <= MAX_SKB_TX_LE)
1299 netif_stop_queue(dev);
1300 }
1301
1290 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod, 1302 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
1291 &sky2->tx_last_put, TX_RING_SIZE); 1303 &sky2->tx_last_put, TX_RING_SIZE);
1292 1304
1293 if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1294 netif_stop_queue(dev);
1295
1296out_unlock: 1305out_unlock:
1297 spin_unlock(&sky2->tx_lock); 1306 spin_unlock(&sky2->tx_lock);
1298 1307
@@ -1707,10 +1716,12 @@ static void sky2_tx_timeout(struct net_device *dev)
1707 1716
1708 1717
1709#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) 1718#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1710/* Want receive buffer size to be multiple of 64 bits, and incl room for vlan */ 1719/* Want receive buffer size to be multiple of 64 bits
1720 * and incl room for vlan and truncation
1721 */
1711static inline unsigned sky2_buf_size(int mtu) 1722static inline unsigned sky2_buf_size(int mtu)
1712{ 1723{
1713 return roundup(mtu + ETH_HLEN + 4, 8); 1724 return roundup(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8;
1714} 1725}
1715 1726
1716static int sky2_change_mtu(struct net_device *dev, int new_mtu) 1727static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -1793,7 +1804,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1793 if (!(status & GMR_FS_RX_OK)) 1804 if (!(status & GMR_FS_RX_OK))
1794 goto resubmit; 1805 goto resubmit;
1795 1806
1796 if ((status >> 16) != length || length > sky2->rx_bufsize) 1807 if (length > sky2->netdev->mtu + ETH_HLEN)
1797 goto oversize; 1808 goto oversize;
1798 1809
1799 if (length < copybreak) { 1810 if (length < copybreak) {
@@ -3243,8 +3254,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3243 } 3254 }
3244 } 3255 }
3245 3256
3246 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ | SA_SAMPLE_RANDOM, 3257 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
3247 DRV_NAME, hw);
3248 if (err) { 3258 if (err) {
3249 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3259 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3250 pci_name(pdev), pdev->irq); 3260 pci_name(pdev), pdev->irq);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 83ff5994a8d5..0f3798f81883 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3532,9 +3532,23 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3532 (base + len + 8 < base)); 3532 (base + len + 8 < base));
3533} 3533}
3534 3534
3535/* Test for DMA addresses > 40-bit */
3536static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3537 int len)
3538{
3539#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3540 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3541 return (((u64) mapping + len) > DMA_40BIT_MASK);
3542 return 0;
3543#else
3544 return 0;
3545#endif
3546}
3547
3535static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); 3548static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3536 3549
3537static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 3550/* Workaround 4GB and 40-bit hardware DMA bugs. */
3551static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3538 u32 last_plus_one, u32 *start, 3552 u32 last_plus_one, u32 *start,
3539 u32 base_flags, u32 mss) 3553 u32 base_flags, u32 mss)
3540{ 3554{
@@ -3742,6 +3756,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3742 if (tg3_4g_overflow_test(mapping, len)) 3756 if (tg3_4g_overflow_test(mapping, len))
3743 would_hit_hwbug = 1; 3757 would_hit_hwbug = 1;
3744 3758
3759 if (tg3_40bit_overflow_test(tp, mapping, len))
3760 would_hit_hwbug = 1;
3761
3745 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 3762 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3746 tg3_set_txd(tp, entry, mapping, len, 3763 tg3_set_txd(tp, entry, mapping, len,
3747 base_flags, (i == last)|(mss << 1)); 3764 base_flags, (i == last)|(mss << 1));
@@ -3763,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3763 /* If the workaround fails due to memory/mapping 3780 /* If the workaround fails due to memory/mapping
3764 * failure, silently drop this packet. 3781 * failure, silently drop this packet.
3765 */ 3782 */
3766 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one, 3783 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3767 &start, base_flags, mss)) 3784 &start, base_flags, mss))
3768 goto out_unlock; 3785 goto out_unlock;
3769 3786
@@ -10608,8 +10625,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10608 unsigned long tg3reg_base, tg3reg_len; 10625 unsigned long tg3reg_base, tg3reg_len;
10609 struct net_device *dev; 10626 struct net_device *dev;
10610 struct tg3 *tp; 10627 struct tg3 *tp;
10611 int i, err, pci_using_dac, pm_cap; 10628 int i, err, pm_cap;
10612 char str[40]; 10629 char str[40];
10630 u64 dma_mask, persist_dma_mask;
10613 10631
10614 if (tg3_version_printed++ == 0) 10632 if (tg3_version_printed++ == 0)
10615 printk(KERN_INFO "%s", version); 10633 printk(KERN_INFO "%s", version);
@@ -10646,26 +10664,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10646 goto err_out_free_res; 10664 goto err_out_free_res;
10647 } 10665 }
10648 10666
10649 /* Configure DMA attributes. */
10650 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
10651 if (!err) {
10652 pci_using_dac = 1;
10653 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
10654 if (err < 0) {
10655 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10656 "for consistent allocations\n");
10657 goto err_out_free_res;
10658 }
10659 } else {
10660 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10661 if (err) {
10662 printk(KERN_ERR PFX "No usable DMA configuration, "
10663 "aborting.\n");
10664 goto err_out_free_res;
10665 }
10666 pci_using_dac = 0;
10667 }
10668
10669 tg3reg_base = pci_resource_start(pdev, 0); 10667 tg3reg_base = pci_resource_start(pdev, 0);
10670 tg3reg_len = pci_resource_len(pdev, 0); 10668 tg3reg_len = pci_resource_len(pdev, 0);
10671 10669
@@ -10679,8 +10677,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10679 SET_MODULE_OWNER(dev); 10677 SET_MODULE_OWNER(dev);
10680 SET_NETDEV_DEV(dev, &pdev->dev); 10678 SET_NETDEV_DEV(dev, &pdev->dev);
10681 10679
10682 if (pci_using_dac)
10683 dev->features |= NETIF_F_HIGHDMA;
10684 dev->features |= NETIF_F_LLTX; 10680 dev->features |= NETIF_F_LLTX;
10685#if TG3_VLAN_TAG_USED 10681#if TG3_VLAN_TAG_USED
10686 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 10682 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -10765,6 +10761,44 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10765 goto err_out_iounmap; 10761 goto err_out_iounmap;
10766 } 10762 }
10767 10763
10764 /* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit.
10765 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
10766 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
10767 * do DMA address check in tg3_start_xmit().
10768 */
10769 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
10770 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
10771#ifdef CONFIG_HIGHMEM
10772 dma_mask = DMA_64BIT_MASK;
10773#endif
10774 } else if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10775 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
10776 else
10777 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
10778
10779 /* Configure DMA attributes. */
10780 if (dma_mask > DMA_32BIT_MASK) {
10781 err = pci_set_dma_mask(pdev, dma_mask);
10782 if (!err) {
10783 dev->features |= NETIF_F_HIGHDMA;
10784 err = pci_set_consistent_dma_mask(pdev,
10785 persist_dma_mask);
10786 if (err < 0) {
10787 printk(KERN_ERR PFX "Unable to obtain 64 bit "
10788 "DMA for consistent allocations\n");
10789 goto err_out_iounmap;
10790 }
10791 }
10792 }
10793 if (err || dma_mask == DMA_32BIT_MASK) {
10794 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10795 if (err) {
10796 printk(KERN_ERR PFX "No usable DMA configuration, "
10797 "aborting.\n");
10798 goto err_out_iounmap;
10799 }
10800 }
10801
10768 tg3_init_bufmgr_config(tp); 10802 tg3_init_bufmgr_config(tp);
10769 10803
10770#if TG3_TSO_SUPPORT != 0 10804#if TG3_TSO_SUPPORT != 0
@@ -10833,9 +10867,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10833 } else 10867 } else
10834 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 10868 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10835 10869
10836 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10837 dev->features &= ~NETIF_F_HIGHDMA;
10838
10839 /* flow control autonegotiation is default behavior */ 10870 /* flow control autonegotiation is default behavior */
10840 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 10871 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10841 10872
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d6c3d52d2e86..6299e186c73f 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1357,7 +1357,6 @@ static int de_open (struct net_device *dev)
1357{ 1357{
1358 struct de_private *de = dev->priv; 1358 struct de_private *de = dev->priv;
1359 int rc; 1359 int rc;
1360 unsigned long flags;
1361 1360
1362 if (netif_msg_ifup(de)) 1361 if (netif_msg_ifup(de))
1363 printk(KERN_DEBUG "%s: enabling interface\n", dev->name); 1362 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
@@ -1371,18 +1370,20 @@ static int de_open (struct net_device *dev)
1371 return rc; 1370 return rc;
1372 } 1371 }
1373 1372
1374 rc = de_init_hw(de); 1373 dw32(IntrMask, 0);
1375 if (rc) {
1376 printk(KERN_ERR "%s: h/w init failure, err=%d\n",
1377 dev->name, rc);
1378 goto err_out_free;
1379 }
1380 1374
1381 rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev); 1375 rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
1382 if (rc) { 1376 if (rc) {
1383 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n", 1377 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
1384 dev->name, dev->irq, rc); 1378 dev->name, dev->irq, rc);
1385 goto err_out_hw; 1379 goto err_out_free;
1380 }
1381
1382 rc = de_init_hw(de);
1383 if (rc) {
1384 printk(KERN_ERR "%s: h/w init failure, err=%d\n",
1385 dev->name, rc);
1386 goto err_out_free_irq;
1386 } 1387 }
1387 1388
1388 netif_start_queue(dev); 1389 netif_start_queue(dev);
@@ -1390,11 +1391,8 @@ static int de_open (struct net_device *dev)
1390 1391
1391 return 0; 1392 return 0;
1392 1393
1393err_out_hw: 1394err_out_free_irq:
1394 spin_lock_irqsave(&de->lock, flags); 1395 free_irq(dev->irq, dev);
1395 de_stop_hw(de);
1396 spin_unlock_irqrestore(&de->lock, flags);
1397
1398err_out_free: 1396err_out_free:
1399 de_free_rings(de); 1397 de_free_rings(de);
1400 return rc; 1398 return rc;
@@ -1450,6 +1448,8 @@ static void de_tx_timeout (struct net_device *dev)
1450 synchronize_irq(dev->irq); 1448 synchronize_irq(dev->irq);
1451 de_clean_rings(de); 1449 de_clean_rings(de);
1452 1450
1451 de_init_rings(de);
1452
1453 de_init_hw(de); 1453 de_init_hw(de);
1454 1454
1455 netif_wake_queue(dev); 1455 netif_wake_queue(dev);