aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139cp.c37
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_main.c119
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c1
-rw-r--r--drivers/net/r8169.c189
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/skge.c75
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c250
-rw-r--r--drivers/net/sky2.h84
-rw-r--r--drivers/net/tg3.c9
-rw-r--r--drivers/net/tlan.c2
-rw-r--r--drivers/net/via-velocity.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c5
15 files changed, 457 insertions, 327 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index f822cd3025ff..dd410496aadb 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1118,13 +1118,18 @@ err_out:
1118 return -ENOMEM; 1118 return -ENOMEM;
1119} 1119}
1120 1120
1121static void cp_init_rings_index (struct cp_private *cp)
1122{
1123 cp->rx_tail = 0;
1124 cp->tx_head = cp->tx_tail = 0;
1125}
1126
1121static int cp_init_rings (struct cp_private *cp) 1127static int cp_init_rings (struct cp_private *cp)
1122{ 1128{
1123 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1129 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1124 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); 1130 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1125 1131
1126 cp->rx_tail = 0; 1132 cp_init_rings_index(cp);
1127 cp->tx_head = cp->tx_tail = 0;
1128 1133
1129 return cp_refill_rx (cp); 1134 return cp_refill_rx (cp);
1130} 1135}
@@ -1886,30 +1891,30 @@ static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1886 1891
1887 spin_unlock_irqrestore (&cp->lock, flags); 1892 spin_unlock_irqrestore (&cp->lock, flags);
1888 1893
1889 if (cp->pdev && cp->wol_enabled) { 1894 pci_save_state(pdev);
1890 pci_save_state (cp->pdev); 1895 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
1891 cp_set_d3_state (cp); 1896 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1892 }
1893 1897
1894 return 0; 1898 return 0;
1895} 1899}
1896 1900
1897static int cp_resume (struct pci_dev *pdev) 1901static int cp_resume (struct pci_dev *pdev)
1898{ 1902{
1899 struct net_device *dev; 1903 struct net_device *dev = pci_get_drvdata (pdev);
1900 struct cp_private *cp; 1904 struct cp_private *cp = netdev_priv(dev);
1901 unsigned long flags; 1905 unsigned long flags;
1902 1906
1903 dev = pci_get_drvdata (pdev); 1907 if (!netif_running(dev))
1904 cp = netdev_priv(dev); 1908 return 0;
1905 1909
1906 netif_device_attach (dev); 1910 netif_device_attach (dev);
1907 1911
1908 if (cp->pdev && cp->wol_enabled) { 1912 pci_set_power_state(pdev, PCI_D0);
1909 pci_set_power_state (cp->pdev, PCI_D0); 1913 pci_restore_state(pdev);
1910 pci_restore_state (cp->pdev); 1914 pci_enable_wake(pdev, PCI_D0, 0);
1911 } 1915
1912 1916 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
1917 cp_init_rings_index (cp);
1913 cp_init_hw (cp); 1918 cp_init_hw (cp);
1914 netif_start_queue (dev); 1919 netif_start_queue (dev);
1915 1920
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 27c77306193b..99baf0e099fc 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -225,9 +225,6 @@ struct e1000_rx_ring {
225 struct e1000_ps_page *ps_page; 225 struct e1000_ps_page *ps_page;
226 struct e1000_ps_page_dma *ps_page_dma; 226 struct e1000_ps_page_dma *ps_page_dma;
227 227
228 struct sk_buff *rx_skb_top;
229 struct sk_buff *rx_skb_prev;
230
231 /* cpu for rx queue */ 228 /* cpu for rx queue */
232 int cpu; 229 int cpu;
233 230
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 31e332935e5a..5b7d0f425af2 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -103,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
103#else 103#else
104#define DRIVERNAPI "-NAPI" 104#define DRIVERNAPI "-NAPI"
105#endif 105#endif
106#define DRV_VERSION "6.3.9-k2"DRIVERNAPI 106#define DRV_VERSION "6.3.9-k4"DRIVERNAPI
107char e1000_driver_version[] = DRV_VERSION; 107char e1000_driver_version[] = DRV_VERSION;
108static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 108static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
109 109
@@ -1635,8 +1635,6 @@ setup_rx_desc_die:
1635 1635
1636 rxdr->next_to_clean = 0; 1636 rxdr->next_to_clean = 0;
1637 rxdr->next_to_use = 0; 1637 rxdr->next_to_use = 0;
1638 rxdr->rx_skb_top = NULL;
1639 rxdr->rx_skb_prev = NULL;
1640 1638
1641 return 0; 1639 return 0;
1642} 1640}
@@ -1713,8 +1711,23 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1713 rctl |= adapter->rx_buffer_len << 0x11; 1711 rctl |= adapter->rx_buffer_len << 0x11;
1714 } else { 1712 } else {
1715 rctl &= ~E1000_RCTL_SZ_4096; 1713 rctl &= ~E1000_RCTL_SZ_4096;
1716 rctl &= ~E1000_RCTL_BSEX; 1714 rctl |= E1000_RCTL_BSEX;
1717 rctl |= E1000_RCTL_SZ_2048; 1715 switch (adapter->rx_buffer_len) {
1716 case E1000_RXBUFFER_2048:
1717 default:
1718 rctl |= E1000_RCTL_SZ_2048;
1719 rctl &= ~E1000_RCTL_BSEX;
1720 break;
1721 case E1000_RXBUFFER_4096:
1722 rctl |= E1000_RCTL_SZ_4096;
1723 break;
1724 case E1000_RXBUFFER_8192:
1725 rctl |= E1000_RCTL_SZ_8192;
1726 break;
1727 case E1000_RXBUFFER_16384:
1728 rctl |= E1000_RCTL_SZ_16384;
1729 break;
1730 }
1718 } 1731 }
1719 1732
1720#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT 1733#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
@@ -2107,16 +2120,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
2107 } 2120 }
2108 } 2121 }
2109 2122
2110 /* there also may be some cached data in our adapter */
2111 if (rx_ring->rx_skb_top) {
2112 dev_kfree_skb(rx_ring->rx_skb_top);
2113
2114 /* rx_skb_prev will be wiped out by rx_skb_top */
2115 rx_ring->rx_skb_top = NULL;
2116 rx_ring->rx_skb_prev = NULL;
2117 }
2118
2119
2120 size = sizeof(struct e1000_buffer) * rx_ring->count; 2123 size = sizeof(struct e1000_buffer) * rx_ring->count;
2121 memset(rx_ring->buffer_info, 0, size); 2124 memset(rx_ring->buffer_info, 0, size);
2122 size = sizeof(struct e1000_ps_page) * rx_ring->count; 2125 size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -3106,24 +3109,27 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3106 break; 3109 break;
3107 } 3110 }
3108 3111
3109 /* since the driver code now supports splitting a packet across 3112
3110 * multiple descriptors, most of the fifo related limitations on
3111 * jumbo frame traffic have gone away.
3112 * simply use 2k descriptors for everything.
3113 *
3114 * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3115 * means we reserve 2 more, this pushes us to allocate from the next
3116 * larger slab size
3117 * i.e. RXBUFFER_2048 --> size-4096 slab */
3118
3119 /* recent hardware supports 1KB granularity */
3120 if (adapter->hw.mac_type > e1000_82547_rev_2) { 3113 if (adapter->hw.mac_type > e1000_82547_rev_2) {
3121 adapter->rx_buffer_len = 3114 adapter->rx_buffer_len = max_frame;
3122 ((max_frame < E1000_RXBUFFER_2048) ?
3123 max_frame : E1000_RXBUFFER_2048);
3124 E1000_ROUNDUP(adapter->rx_buffer_len, 1024); 3115 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
3125 } else 3116 } else {
3126 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3117 if(unlikely((adapter->hw.mac_type < e1000_82543) &&
3118 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
3119 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
3120 "on 82542\n");
3121 return -EINVAL;
3122 } else {
3123 if(max_frame <= E1000_RXBUFFER_2048)
3124 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3125 else if(max_frame <= E1000_RXBUFFER_4096)
3126 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3127 else if(max_frame <= E1000_RXBUFFER_8192)
3128 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3129 else if(max_frame <= E1000_RXBUFFER_16384)
3130 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3131 }
3132 }
3127 3133
3128 netdev->mtu = new_mtu; 3134 netdev->mtu = new_mtu;
3129 3135
@@ -3620,7 +3626,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3620 uint8_t last_byte; 3626 uint8_t last_byte;
3621 unsigned int i; 3627 unsigned int i;
3622 int cleaned_count = 0; 3628 int cleaned_count = 0;
3623 boolean_t cleaned = FALSE, multi_descriptor = FALSE; 3629 boolean_t cleaned = FALSE;
3624 3630
3625 i = rx_ring->next_to_clean; 3631 i = rx_ring->next_to_clean;
3626 rx_desc = E1000_RX_DESC(*rx_ring, i); 3632 rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -3652,43 +3658,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3652 3658
3653 length = le16_to_cpu(rx_desc->length); 3659 length = le16_to_cpu(rx_desc->length);
3654 3660
3655 skb_put(skb, length); 3661 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
3656 3662 /* All receives must fit into a single buffer */
3657 if (!(status & E1000_RXD_STAT_EOP)) { 3663 E1000_DBG("%s: Receive packet consumed multiple"
3658 if (!rx_ring->rx_skb_top) { 3664 " buffers\n", netdev->name);
3659 rx_ring->rx_skb_top = skb; 3665 dev_kfree_skb_irq(skb);
3660 rx_ring->rx_skb_top->len = length;
3661 rx_ring->rx_skb_prev = skb;
3662 } else {
3663 if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
3664 rx_ring->rx_skb_prev->next = skb;
3665 skb->prev = rx_ring->rx_skb_prev;
3666 } else {
3667 skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
3668 }
3669 rx_ring->rx_skb_prev = skb;
3670 rx_ring->rx_skb_top->data_len += length;
3671 }
3672 goto next_desc; 3666 goto next_desc;
3673 } else {
3674 if (rx_ring->rx_skb_top) {
3675 if (skb_shinfo(rx_ring->rx_skb_top)
3676 ->frag_list) {
3677 rx_ring->rx_skb_prev->next = skb;
3678 skb->prev = rx_ring->rx_skb_prev;
3679 } else
3680 skb_shinfo(rx_ring->rx_skb_top)
3681 ->frag_list = skb;
3682
3683 rx_ring->rx_skb_top->data_len += length;
3684 rx_ring->rx_skb_top->len +=
3685 rx_ring->rx_skb_top->data_len;
3686
3687 skb = rx_ring->rx_skb_top;
3688 multi_descriptor = TRUE;
3689 rx_ring->rx_skb_top = NULL;
3690 rx_ring->rx_skb_prev = NULL;
3691 }
3692 } 3667 }
3693 3668
3694 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3669 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
@@ -3712,10 +3687,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3712 * performance for small packets with large amounts 3687 * performance for small packets with large amounts
3713 * of reassembly being done in the stack */ 3688 * of reassembly being done in the stack */
3714#define E1000_CB_LENGTH 256 3689#define E1000_CB_LENGTH 256
3715 if ((length < E1000_CB_LENGTH) && 3690 if (length < E1000_CB_LENGTH) {
3716 !rx_ring->rx_skb_top &&
3717 /* or maybe (status & E1000_RXD_STAT_EOP) && */
3718 !multi_descriptor) {
3719 struct sk_buff *new_skb = 3691 struct sk_buff *new_skb =
3720 dev_alloc_skb(length + NET_IP_ALIGN); 3692 dev_alloc_skb(length + NET_IP_ALIGN);
3721 if (new_skb) { 3693 if (new_skb) {
@@ -3729,7 +3701,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3729 skb = new_skb; 3701 skb = new_skb;
3730 skb_put(skb, length); 3702 skb_put(skb, length);
3731 } 3703 }
3732 } 3704 } else
3705 skb_put(skb, length);
3733 3706
3734 /* end copybreak code */ 3707 /* end copybreak code */
3735 3708
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 1b699259b4ec..31fb2d75dc44 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -57,7 +57,7 @@ struct ifb_private {
57 struct sk_buff_head tq; 57 struct sk_buff_head tq;
58}; 58};
59 59
60static int numifbs = 1; 60static int numifbs = 2;
61 61
62static void ri_tasklet(unsigned long dev); 62static void ri_tasklet(unsigned long dev);
63static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); 63static int ifb_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 01ddfc8cce3f..aa5581369399 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -806,6 +806,7 @@ static struct pcmcia_device_id axnet_ids[] = {
806 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309), 806 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
807 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), 807 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106),
808 PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), 808 PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab),
809 PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc),
809 PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), 810 PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef),
810 PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), 811 PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef),
811 PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1), 812 PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1),
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 6e1018448eea..8cc0d0bbdf50 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -287,6 +287,20 @@ enum RTL8169_register_content {
287 TxInterFrameGapShift = 24, 287 TxInterFrameGapShift = 24,
288 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 288 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
289 289
290 /* Config1 register p.24 */
291 PMEnable = (1 << 0), /* Power Management Enable */
292
293 /* Config3 register p.25 */
294 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
295 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
296
297 /* Config5 register p.27 */
298 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
299 MWF = (1 << 5), /* Accept Multicast wakeup frame */
300 UWF = (1 << 4), /* Accept Unicast wakeup frame */
301 LanWake = (1 << 1), /* LanWake enable/disable */
302 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
303
290 /* TBICSR p.28 */ 304 /* TBICSR p.28 */
291 TBIReset = 0x80000000, 305 TBIReset = 0x80000000,
292 TBILoopback = 0x40000000, 306 TBILoopback = 0x40000000,
@@ -433,6 +447,7 @@ struct rtl8169_private {
433 unsigned int (*phy_reset_pending)(void __iomem *); 447 unsigned int (*phy_reset_pending)(void __iomem *);
434 unsigned int (*link_ok)(void __iomem *); 448 unsigned int (*link_ok)(void __iomem *);
435 struct work_struct task; 449 struct work_struct task;
450 unsigned wol_enabled : 1;
436}; 451};
437 452
438MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 453MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -607,6 +622,80 @@ static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
607 *duplex = p->duplex; 622 *duplex = p->duplex;
608} 623}
609 624
625static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
626{
627 struct rtl8169_private *tp = netdev_priv(dev);
628 void __iomem *ioaddr = tp->mmio_addr;
629 u8 options;
630
631 wol->wolopts = 0;
632
633#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
634 wol->supported = WAKE_ANY;
635
636 spin_lock_irq(&tp->lock);
637
638 options = RTL_R8(Config1);
639 if (!(options & PMEnable))
640 goto out_unlock;
641
642 options = RTL_R8(Config3);
643 if (options & LinkUp)
644 wol->wolopts |= WAKE_PHY;
645 if (options & MagicPacket)
646 wol->wolopts |= WAKE_MAGIC;
647
648 options = RTL_R8(Config5);
649 if (options & UWF)
650 wol->wolopts |= WAKE_UCAST;
651 if (options & BWF)
652 wol->wolopts |= WAKE_BCAST;
653 if (options & MWF)
654 wol->wolopts |= WAKE_MCAST;
655
656out_unlock:
657 spin_unlock_irq(&tp->lock);
658}
659
660static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
661{
662 struct rtl8169_private *tp = netdev_priv(dev);
663 void __iomem *ioaddr = tp->mmio_addr;
664 int i;
665 static struct {
666 u32 opt;
667 u16 reg;
668 u8 mask;
669 } cfg[] = {
670 { WAKE_ANY, Config1, PMEnable },
671 { WAKE_PHY, Config3, LinkUp },
672 { WAKE_MAGIC, Config3, MagicPacket },
673 { WAKE_UCAST, Config5, UWF },
674 { WAKE_BCAST, Config5, BWF },
675 { WAKE_MCAST, Config5, MWF },
676 { WAKE_ANY, Config5, LanWake }
677 };
678
679 spin_lock_irq(&tp->lock);
680
681 RTL_W8(Cfg9346, Cfg9346_Unlock);
682
683 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
684 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
685 if (wol->wolopts & cfg[i].opt)
686 options |= cfg[i].mask;
687 RTL_W8(cfg[i].reg, options);
688 }
689
690 RTL_W8(Cfg9346, Cfg9346_Lock);
691
692 tp->wol_enabled = (wol->wolopts) ? 1 : 0;
693
694 spin_unlock_irq(&tp->lock);
695
696 return 0;
697}
698
610static void rtl8169_get_drvinfo(struct net_device *dev, 699static void rtl8169_get_drvinfo(struct net_device *dev,
611 struct ethtool_drvinfo *info) 700 struct ethtool_drvinfo *info)
612{ 701{
@@ -1025,6 +1114,8 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
1025 .get_tso = ethtool_op_get_tso, 1114 .get_tso = ethtool_op_get_tso,
1026 .set_tso = ethtool_op_set_tso, 1115 .set_tso = ethtool_op_set_tso,
1027 .get_regs = rtl8169_get_regs, 1116 .get_regs = rtl8169_get_regs,
1117 .get_wol = rtl8169_get_wol,
1118 .set_wol = rtl8169_set_wol,
1028 .get_strings = rtl8169_get_strings, 1119 .get_strings = rtl8169_get_strings,
1029 .get_stats_count = rtl8169_get_stats_count, 1120 .get_stats_count = rtl8169_get_stats_count,
1030 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1121 .get_ethtool_stats = rtl8169_get_ethtool_stats,
@@ -1442,6 +1533,11 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1442 } 1533 }
1443 tp->chipset = i; 1534 tp->chipset = i;
1444 1535
1536 RTL_W8(Cfg9346, Cfg9346_Unlock);
1537 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1538 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1539 RTL_W8(Cfg9346, Cfg9346_Lock);
1540
1445 *ioaddr_out = ioaddr; 1541 *ioaddr_out = ioaddr;
1446 *dev_out = dev; 1542 *dev_out = dev;
1447out: 1543out:
@@ -1612,49 +1708,6 @@ rtl8169_remove_one(struct pci_dev *pdev)
1612 pci_set_drvdata(pdev, NULL); 1708 pci_set_drvdata(pdev, NULL);
1613} 1709}
1614 1710
1615#ifdef CONFIG_PM
1616
1617static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
1618{
1619 struct net_device *dev = pci_get_drvdata(pdev);
1620 struct rtl8169_private *tp = netdev_priv(dev);
1621 void __iomem *ioaddr = tp->mmio_addr;
1622 unsigned long flags;
1623
1624 if (!netif_running(dev))
1625 return 0;
1626
1627 netif_device_detach(dev);
1628 netif_stop_queue(dev);
1629 spin_lock_irqsave(&tp->lock, flags);
1630
1631 /* Disable interrupts, stop Rx and Tx */
1632 RTL_W16(IntrMask, 0);
1633 RTL_W8(ChipCmd, 0);
1634
1635 /* Update the error counts. */
1636 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
1637 RTL_W32(RxMissed, 0);
1638 spin_unlock_irqrestore(&tp->lock, flags);
1639
1640 return 0;
1641}
1642
1643static int rtl8169_resume(struct pci_dev *pdev)
1644{
1645 struct net_device *dev = pci_get_drvdata(pdev);
1646
1647 if (!netif_running(dev))
1648 return 0;
1649
1650 netif_device_attach(dev);
1651 rtl8169_hw_start(dev);
1652
1653 return 0;
1654}
1655
1656#endif /* CONFIG_PM */
1657
1658static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, 1711static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1659 struct net_device *dev) 1712 struct net_device *dev)
1660{ 1713{
@@ -2700,6 +2753,56 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
2700 return &tp->stats; 2753 return &tp->stats;
2701} 2754}
2702 2755
2756#ifdef CONFIG_PM
2757
2758static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
2759{
2760 struct net_device *dev = pci_get_drvdata(pdev);
2761 struct rtl8169_private *tp = netdev_priv(dev);
2762 void __iomem *ioaddr = tp->mmio_addr;
2763
2764 if (!netif_running(dev))
2765 goto out;
2766
2767 netif_device_detach(dev);
2768 netif_stop_queue(dev);
2769
2770 spin_lock_irq(&tp->lock);
2771
2772 rtl8169_asic_down(ioaddr);
2773
2774 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2775 RTL_W32(RxMissed, 0);
2776
2777 spin_unlock_irq(&tp->lock);
2778
2779 pci_save_state(pdev);
2780 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
2781 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2782out:
2783 return 0;
2784}
2785
2786static int rtl8169_resume(struct pci_dev *pdev)
2787{
2788 struct net_device *dev = pci_get_drvdata(pdev);
2789
2790 if (!netif_running(dev))
2791 goto out;
2792
2793 netif_device_attach(dev);
2794
2795 pci_set_power_state(pdev, PCI_D0);
2796 pci_restore_state(pdev);
2797 pci_enable_wake(pdev, PCI_D0, 0);
2798
2799 rtl8169_schedule_work(dev, rtl8169_reset_task);
2800out:
2801 return 0;
2802}
2803
2804#endif /* CONFIG_PM */
2805
2703static struct pci_driver rtl8169_pci_driver = { 2806static struct pci_driver rtl8169_pci_driver = {
2704 .name = MODULENAME, 2807 .name = MODULENAME,
2705 .id_table = rtl8169_pci_tbl, 2808 .id_table = rtl8169_pci_tbl,
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 3d95fa20cd88..7a952fe60be2 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -540,7 +540,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
540 printk("%2.2x.\n", net_dev->dev_addr[i]); 540 printk("%2.2x.\n", net_dev->dev_addr[i]);
541 541
542 /* Detect Wake on Lan support */ 542 /* Detect Wake on Lan support */
543 ret = inl(CFGPMC & PMESP); 543 ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27;
544 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0) 544 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
545 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name); 545 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
546 546
@@ -2040,7 +2040,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
2040 2040
2041 if (wol->wolopts == 0) { 2041 if (wol->wolopts == 0) {
2042 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); 2042 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2043 cfgpmcsr |= ~PME_EN; 2043 cfgpmcsr &= ~PME_EN;
2044 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); 2044 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2045 outl(pmctrl_bits, pmctrl_addr); 2045 outl(pmctrl_bits, pmctrl_addr);
2046 if (netif_msg_wol(sis_priv)) 2046 if (netif_msg_wol(sis_priv))
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 67fb19b8fde9..25e028b7ce48 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
879 int i; 879 int i;
880 880
881 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 881 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
882 xm_read16(hw, port, XM_PHY_DATA); 882 *val = xm_read16(hw, port, XM_PHY_DATA);
883 883
884 /* Need to wait for external PHY */
885 for (i = 0; i < PHY_RETRIES; i++) { 884 for (i = 0; i < PHY_RETRIES; i++) {
886 udelay(1);
887 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) 885 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
888 goto ready; 886 goto ready;
887 udelay(1);
889 } 888 }
890 889
891 return -ETIMEDOUT; 890 return -ETIMEDOUT;
@@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
918 917
919 ready: 918 ready:
920 xm_write16(hw, port, XM_PHY_DATA, val); 919 xm_write16(hw, port, XM_PHY_DATA, val);
921 return 0; 920 for (i = 0; i < PHY_RETRIES; i++) {
921 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
922 return 0;
923 udelay(1);
924 }
925 return -ETIMEDOUT;
922} 926}
923 927
924static void genesis_init(struct skge_hw *hw) 928static void genesis_init(struct skge_hw *hw)
@@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1168 u32 r; 1172 u32 r;
1169 const u8 zero[6] = { 0 }; 1173 const u8 zero[6] = { 0 };
1170 1174
1171 /* Clear MIB counters */ 1175 for (i = 0; i < 10; i++) {
1172 xm_write16(hw, port, XM_STAT_CMD, 1176 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
1173 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1177 MFF_SET_MAC_RST);
1174 /* Clear two times according to Errata #3 */ 1178 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
1175 xm_write16(hw, port, XM_STAT_CMD, 1179 goto reset_ok;
1176 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1180 udelay(1);
1181 }
1182
1183 printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
1177 1184
1185 reset_ok:
1178 /* Unreset the XMAC. */ 1186 /* Unreset the XMAC. */
1179 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1187 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1180 1188
@@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1191 r |= GP_DIR_2|GP_IO_2; 1199 r |= GP_DIR_2|GP_IO_2;
1192 1200
1193 skge_write32(hw, B2_GP_IO, r); 1201 skge_write32(hw, B2_GP_IO, r);
1194 skge_read32(hw, B2_GP_IO); 1202
1195 1203
1196 /* Enable GMII interface */ 1204 /* Enable GMII interface */
1197 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); 1205 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
@@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1205 for (i = 1; i < 16; i++) 1213 for (i = 1; i < 16; i++)
1206 xm_outaddr(hw, port, XM_EXM(i), zero); 1214 xm_outaddr(hw, port, XM_EXM(i), zero);
1207 1215
1216 /* Clear MIB counters */
1217 xm_write16(hw, port, XM_STAT_CMD,
1218 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1219 /* Clear two times according to Errata #3 */
1220 xm_write16(hw, port, XM_STAT_CMD,
1221 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1222
1208 /* configure Rx High Water Mark (XM_RX_HI_WM) */ 1223 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1209 xm_write16(hw, port, XM_RX_HI_WM, 1450); 1224 xm_write16(hw, port, XM_RX_HI_WM, 1450);
1210 1225
@@ -2170,8 +2185,10 @@ static int skge_up(struct net_device *dev)
2170 skge->tx_avail = skge->tx_ring.count - 1; 2185 skge->tx_avail = skge->tx_ring.count - 1;
2171 2186
2172 /* Enable IRQ from port */ 2187 /* Enable IRQ from port */
2188 spin_lock_irq(&hw->hw_lock);
2173 hw->intr_mask |= portirqmask[port]; 2189 hw->intr_mask |= portirqmask[port];
2174 skge_write32(hw, B0_IMSK, hw->intr_mask); 2190 skge_write32(hw, B0_IMSK, hw->intr_mask);
2191 spin_unlock_irq(&hw->hw_lock);
2175 2192
2176 /* Initialize MAC */ 2193 /* Initialize MAC */
2177 spin_lock_bh(&hw->phy_lock); 2194 spin_lock_bh(&hw->phy_lock);
@@ -2229,8 +2246,10 @@ static int skge_down(struct net_device *dev)
2229 else 2246 else
2230 yukon_stop(skge); 2247 yukon_stop(skge);
2231 2248
2249 spin_lock_irq(&hw->hw_lock);
2232 hw->intr_mask &= ~portirqmask[skge->port]; 2250 hw->intr_mask &= ~portirqmask[skge->port];
2233 skge_write32(hw, B0_IMSK, hw->intr_mask); 2251 skge_write32(hw, B0_IMSK, hw->intr_mask);
2252 spin_unlock_irq(&hw->hw_lock);
2234 2253
2235 /* Stop transmitter */ 2254 /* Stop transmitter */
2236 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2255 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
@@ -2678,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget)
2678 2697
2679 /* restart receiver */ 2698 /* restart receiver */
2680 wmb(); 2699 wmb();
2681 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), 2700 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
2682 CSR_START | CSR_IRQ_CL_F);
2683 2701
2684 *budget -= work_done; 2702 *budget -= work_done;
2685 dev->quota -= work_done; 2703 dev->quota -= work_done;
@@ -2687,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget)
2687 if (work_done >= to_do) 2705 if (work_done >= to_do)
2688 return 1; /* not done */ 2706 return 1; /* not done */
2689 2707
2690 netif_rx_complete(dev); 2708 spin_lock_irq(&hw->hw_lock);
2691 hw->intr_mask |= portirqmask[skge->port]; 2709 __netif_rx_complete(dev);
2692 skge_write32(hw, B0_IMSK, hw->intr_mask); 2710 hw->intr_mask |= portirqmask[skge->port];
2693 skge_read32(hw, B0_IMSK); 2711 skge_write32(hw, B0_IMSK, hw->intr_mask);
2712 spin_unlock_irq(&hw->hw_lock);
2694 2713
2695 return 0; 2714 return 0;
2696} 2715}
@@ -2850,18 +2869,10 @@ static void skge_extirq(unsigned long data)
2850 } 2869 }
2851 spin_unlock(&hw->phy_lock); 2870 spin_unlock(&hw->phy_lock);
2852 2871
2853 local_irq_disable(); 2872 spin_lock_irq(&hw->hw_lock);
2854 hw->intr_mask |= IS_EXT_REG; 2873 hw->intr_mask |= IS_EXT_REG;
2855 skge_write32(hw, B0_IMSK, hw->intr_mask); 2874 skge_write32(hw, B0_IMSK, hw->intr_mask);
2856 local_irq_enable(); 2875 spin_unlock_irq(&hw->hw_lock);
2857}
2858
2859static inline void skge_wakeup(struct net_device *dev)
2860{
2861 struct skge_port *skge = netdev_priv(dev);
2862
2863 prefetch(skge->rx_ring.to_clean);
2864 netif_rx_schedule(dev);
2865} 2876}
2866 2877
2867static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) 2878static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2872,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2872 if (status == 0 || status == ~0) /* hotplug or shared irq */ 2883 if (status == 0 || status == ~0) /* hotplug or shared irq */
2873 return IRQ_NONE; 2884 return IRQ_NONE;
2874 2885
2875 status &= hw->intr_mask; 2886 spin_lock(&hw->hw_lock);
2876 if (status & IS_R1_F) { 2887 if (status & IS_R1_F) {
2888 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2877 hw->intr_mask &= ~IS_R1_F; 2889 hw->intr_mask &= ~IS_R1_F;
2878 skge_wakeup(hw->dev[0]); 2890 netif_rx_schedule(hw->dev[0]);
2879 } 2891 }
2880 2892
2881 if (status & IS_R2_F) { 2893 if (status & IS_R2_F) {
2894 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2882 hw->intr_mask &= ~IS_R2_F; 2895 hw->intr_mask &= ~IS_R2_F;
2883 skge_wakeup(hw->dev[1]); 2896 netif_rx_schedule(hw->dev[1]);
2884 } 2897 }
2885 2898
2886 if (status & IS_XA1_F) 2899 if (status & IS_XA1_F)
@@ -2922,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2922 } 2935 }
2923 2936
2924 skge_write32(hw, B0_IMSK, hw->intr_mask); 2937 skge_write32(hw, B0_IMSK, hw->intr_mask);
2938 spin_unlock(&hw->hw_lock);
2925 2939
2926 return IRQ_HANDLED; 2940 return IRQ_HANDLED;
2927} 2941}
@@ -3290,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3290 3304
3291 hw->pdev = pdev; 3305 hw->pdev = pdev;
3292 spin_lock_init(&hw->phy_lock); 3306 spin_lock_init(&hw->phy_lock);
3307 spin_lock_init(&hw->hw_lock);
3293 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); 3308 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
3294 3309
3295 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3310 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 2efdacc290e5..941f12a333b6 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2402,6 +2402,7 @@ struct skge_hw {
2402 2402
2403 struct tasklet_struct ext_tasklet; 2403 struct tasklet_struct ext_tasklet;
2404 spinlock_t phy_lock; 2404 spinlock_t phy_lock;
2405 spinlock_t hw_lock;
2405}; 2406};
2406 2407
2407enum { 2408enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index bfeba5b9cd7a..72c1630977d6 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -96,10 +96,6 @@ static int copybreak __read_mostly = 256;
96module_param(copybreak, int, 0); 96module_param(copybreak, int, 0);
97MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 97MODULE_PARM_DESC(copybreak, "Receive copy threshold");
98 98
99static int disable_msi = 0;
100module_param(disable_msi, int, 0);
101MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
102
103static const struct pci_device_id sky2_id_table[] = { 99static const struct pci_device_id sky2_id_table[] = {
104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, 100 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
105 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 101 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -195,11 +191,11 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
195 pr_debug("sky2_set_power_state %d\n", state); 191 pr_debug("sky2_set_power_state %d\n", state);
196 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 192 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
197 193
198 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control); 194 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
199 vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) && 195 vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
200 (power_control & PCI_PM_CAP_PME_D3cold); 196 (power_control & PCI_PM_CAP_PME_D3cold);
201 197
202 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control); 198 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
203 199
204 power_control |= PCI_PM_CTRL_PME_STATUS; 200 power_control |= PCI_PM_CTRL_PME_STATUS;
205 power_control &= ~(PCI_PM_CTRL_STATE_MASK); 201 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
@@ -223,7 +219,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
223 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 219 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
224 220
225 /* Turn off phy power saving */ 221 /* Turn off phy power saving */
226 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1); 222 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
227 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 223 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
228 224
229 /* looks like this XL is back asswards .. */ 225 /* looks like this XL is back asswards .. */
@@ -232,18 +228,28 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
232 if (hw->ports > 1) 228 if (hw->ports > 1)
233 reg1 |= PCI_Y2_PHY2_COMA; 229 reg1 |= PCI_Y2_PHY2_COMA;
234 } 230 }
235 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); 231
232 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
233 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
234 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
235 reg1 &= P_ASPM_CONTROL_MSK;
236 sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
237 sky2_pci_write32(hw, PCI_DEV_REG5, 0);
238 }
239
240 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
241
236 break; 242 break;
237 243
238 case PCI_D3hot: 244 case PCI_D3hot:
239 case PCI_D3cold: 245 case PCI_D3cold:
240 /* Turn on phy power saving */ 246 /* Turn on phy power saving */
241 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1); 247 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
242 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 248 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
243 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 249 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
244 else 250 else
245 reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 251 reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
246 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); 252 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
247 253
248 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 254 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
249 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 255 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
@@ -265,7 +271,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
265 ret = -1; 271 ret = -1;
266 } 272 }
267 273
268 pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control); 274 sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
269 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 275 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
270 return ret; 276 return ret;
271} 277}
@@ -463,16 +469,31 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
463 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); 469 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
464 } 470 }
465 471
466 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 472 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
473 /* apply fixes in PHY AFE */
474 gm_phy_write(hw, port, 22, 255);
475 /* increase differential signal amplitude in 10BASE-T */
476 gm_phy_write(hw, port, 24, 0xaa99);
477 gm_phy_write(hw, port, 23, 0x2011);
467 478
468 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { 479 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
469 /* turn on 100 Mbps LED (LED_LINK100) */ 480 gm_phy_write(hw, port, 24, 0xa204);
470 ledover |= PHY_M_LED_MO_100(MO_LED_ON); 481 gm_phy_write(hw, port, 23, 0x2002);
471 }
472 482
473 if (ledover) 483 /* set page register to 0 */
474 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 484 gm_phy_write(hw, port, 22, 0);
485 } else {
486 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
487
488 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
489 /* turn on 100 Mbps LED (LED_LINK100) */
490 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
491 }
492
493 if (ledover)
494 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
475 495
496 }
476 /* Enable phy interrupt on auto-negotiation complete (or link up) */ 497 /* Enable phy interrupt on auto-negotiation complete (or link up) */
477 if (sky2->autoneg == AUTONEG_ENABLE) 498 if (sky2->autoneg == AUTONEG_ENABLE)
478 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); 499 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
@@ -953,6 +974,12 @@ static int sky2_rx_start(struct sky2_port *sky2)
953 974
954 sky2->rx_put = sky2->rx_next = 0; 975 sky2->rx_put = sky2->rx_next = 0;
955 sky2_qset(hw, rxq); 976 sky2_qset(hw, rxq);
977
978 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
979 /* MAC Rx RAM Read is controlled by hardware */
980 sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
981 }
982
956 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); 983 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
957 984
958 rx_set_checksum(sky2); 985 rx_set_checksum(sky2);
@@ -1035,9 +1062,10 @@ static int sky2_up(struct net_device *dev)
1035 RB_RST_SET); 1062 RB_RST_SET);
1036 1063
1037 sky2_qset(hw, txqaddr[port]); 1064 sky2_qset(hw, txqaddr[port]);
1038 if (hw->chip_id == CHIP_ID_YUKON_EC_U)
1039 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1040 1065
1066 /* Set almost empty threshold */
1067 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1)
1068 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1041 1069
1042 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1070 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1043 TX_RING_SIZE - 1); 1071 TX_RING_SIZE - 1);
@@ -1047,8 +1075,10 @@ static int sky2_up(struct net_device *dev)
1047 goto err_out; 1075 goto err_out;
1048 1076
1049 /* Enable interrupts from phy/mac for port */ 1077 /* Enable interrupts from phy/mac for port */
1078 spin_lock_irq(&hw->hw_lock);
1050 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1079 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
1051 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1080 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1081 spin_unlock_irq(&hw->hw_lock);
1052 return 0; 1082 return 0;
1053 1083
1054err_out: 1084err_out:
@@ -1348,10 +1378,10 @@ static int sky2_down(struct net_device *dev)
1348 netif_stop_queue(dev); 1378 netif_stop_queue(dev);
1349 1379
1350 /* Disable port IRQ */ 1380 /* Disable port IRQ */
1351 local_irq_disable(); 1381 spin_lock_irq(&hw->hw_lock);
1352 hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); 1382 hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
1353 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1383 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1354 local_irq_enable(); 1384 spin_unlock_irq(&hw->hw_lock);
1355 1385
1356 flush_scheduled_work(); 1386 flush_scheduled_work();
1357 1387
@@ -1633,10 +1663,10 @@ static void sky2_phy_task(void *arg)
1633out: 1663out:
1634 up(&sky2->phy_sema); 1664 up(&sky2->phy_sema);
1635 1665
1636 local_irq_disable(); 1666 spin_lock_irq(&hw->hw_lock);
1637 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2; 1667 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
1638 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1668 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1639 local_irq_enable(); 1669 spin_unlock_irq(&hw->hw_lock);
1640} 1670}
1641 1671
1642 1672
@@ -1863,6 +1893,17 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1863 1893
1864 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 1894 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1865 1895
1896 /*
1897 * Kick the STAT_LEV_TIMER_CTRL timer.
1898 * This fixes my hangs on Yukon-EC (0xb6) rev 1.
1899 * The if clause is there to start the timer only if it has been
1900 * configured correctly and not been disabled via ethtool.
1901 */
1902 if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
1903 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
1904 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
1905 }
1906
1866 hwidx = sky2_read16(hw, STAT_PUT_IDX); 1907 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1867 BUG_ON(hwidx >= STATUS_RING_SIZE); 1908 BUG_ON(hwidx >= STATUS_RING_SIZE);
1868 rmb(); 1909 rmb();
@@ -1945,16 +1986,19 @@ exit_loop:
1945 sky2_tx_check(hw, 0, tx_done[0]); 1986 sky2_tx_check(hw, 0, tx_done[0]);
1946 sky2_tx_check(hw, 1, tx_done[1]); 1987 sky2_tx_check(hw, 1, tx_done[1]);
1947 1988
1989 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
1990 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1991 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1992 }
1993
1948 if (likely(work_done < to_do)) { 1994 if (likely(work_done < to_do)) {
1949 /* need to restart TX timer */ 1995 spin_lock_irq(&hw->hw_lock);
1950 if (is_ec_a1(hw)) { 1996 __netif_rx_complete(dev0);
1951 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1952 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1953 }
1954 1997
1955 netif_rx_complete(dev0);
1956 hw->intr_mask |= Y2_IS_STAT_BMU; 1998 hw->intr_mask |= Y2_IS_STAT_BMU;
1957 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1999 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2000 spin_unlock_irq(&hw->hw_lock);
2001
1958 return 0; 2002 return 0;
1959 } else { 2003 } else {
1960 *budget -= work_done; 2004 *budget -= work_done;
@@ -2017,13 +2061,13 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2017 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2061 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2018 u16 pci_err; 2062 u16 pci_err;
2019 2063
2020 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); 2064 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2021 if (net_ratelimit()) 2065 if (net_ratelimit())
2022 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", 2066 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
2023 pci_name(hw->pdev), pci_err); 2067 pci_name(hw->pdev), pci_err);
2024 2068
2025 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2069 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2026 pci_write_config_word(hw->pdev, PCI_STATUS, 2070 sky2_pci_write16(hw, PCI_STATUS,
2027 pci_err | PCI_STATUS_ERROR_BITS); 2071 pci_err | PCI_STATUS_ERROR_BITS);
2028 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2072 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2029 } 2073 }
@@ -2032,7 +2076,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2032 /* PCI-Express uncorrectable Error occurred */ 2076 /* PCI-Express uncorrectable Error occurred */
2033 u32 pex_err; 2077 u32 pex_err;
2034 2078
2035 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); 2079 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
2036 2080
2037 if (net_ratelimit()) 2081 if (net_ratelimit())
2038 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2082 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
@@ -2040,7 +2084,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2040 2084
2041 /* clear the interrupt */ 2085 /* clear the interrupt */
2042 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2086 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2043 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, 2087 sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
2044 0xffffffffUL); 2088 0xffffffffUL);
2045 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2089 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2046 2090
@@ -2086,6 +2130,7 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
2086 2130
2087 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); 2131 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
2088 sky2_write32(hw, B0_IMSK, hw->intr_mask); 2132 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2133
2089 schedule_work(&sky2->phy_task); 2134 schedule_work(&sky2->phy_task);
2090} 2135}
2091 2136
@@ -2099,6 +2144,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2099 if (status == 0 || status == ~0) 2144 if (status == 0 || status == ~0)
2100 return IRQ_NONE; 2145 return IRQ_NONE;
2101 2146
2147 spin_lock(&hw->hw_lock);
2102 if (status & Y2_IS_HW_ERR) 2148 if (status & Y2_IS_HW_ERR)
2103 sky2_hw_intr(hw); 2149 sky2_hw_intr(hw);
2104 2150
@@ -2127,7 +2173,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2127 2173
2128 sky2_write32(hw, B0_Y2_SP_ICR, 2); 2174 sky2_write32(hw, B0_Y2_SP_ICR, 2);
2129 2175
2130 sky2_read32(hw, B0_IMSK); 2176 spin_unlock(&hw->hw_lock);
2131 2177
2132 return IRQ_HANDLED; 2178 return IRQ_HANDLED;
2133} 2179}
@@ -2170,7 +2216,7 @@ static int sky2_reset(struct sky2_hw *hw)
2170{ 2216{
2171 u16 status; 2217 u16 status;
2172 u8 t8, pmd_type; 2218 u8 t8, pmd_type;
2173 int i, err; 2219 int i;
2174 2220
2175 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2221 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2176 2222
@@ -2192,25 +2238,18 @@ static int sky2_reset(struct sky2_hw *hw)
2192 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2238 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2193 2239
2194 /* clear PCI errors, if any */ 2240 /* clear PCI errors, if any */
2195 err = pci_read_config_word(hw->pdev, PCI_STATUS, &status); 2241 status = sky2_pci_read16(hw, PCI_STATUS);
2196 if (err)
2197 goto pci_err;
2198 2242
2199 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2243 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2200 err = pci_write_config_word(hw->pdev, PCI_STATUS, 2244 sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS);
2201 status | PCI_STATUS_ERROR_BITS); 2245
2202 if (err)
2203 goto pci_err;
2204 2246
2205 sky2_write8(hw, B0_CTST, CS_MRST_CLR); 2247 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2206 2248
2207 /* clear any PEX errors */ 2249 /* clear any PEX errors */
2208 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) { 2250 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
2209 err = pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, 2251 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2210 0xffffffffUL); 2252
2211 if (err)
2212 goto pci_err;
2213 }
2214 2253
2215 pmd_type = sky2_read8(hw, B2_PMD_TYP); 2254 pmd_type = sky2_read8(hw, B2_PMD_TYP);
2216 hw->copper = !(pmd_type == 'L' || pmd_type == 'S'); 2255 hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
@@ -2309,8 +2348,7 @@ static int sky2_reset(struct sky2_hw *hw)
2309 sky2_write8(hw, STAT_FIFO_ISR_WM, 16); 2348 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2310 2349
2311 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); 2350 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2312 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); 2351 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7));
2313 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2314 } 2352 }
2315 2353
2316 /* enable status unit */ 2354 /* enable status unit */
@@ -2321,14 +2359,6 @@ static int sky2_reset(struct sky2_hw *hw)
2321 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); 2359 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2322 2360
2323 return 0; 2361 return 0;
2324
2325pci_err:
2326 /* This is to catch a BIOS bug workaround where
2327 * mmconfig table doesn't have other buses.
2328 */
2329 printk(KERN_ERR PFX "%s: can't access PCI config space\n",
2330 pci_name(hw->pdev));
2331 return err;
2332} 2362}
2333 2363
2334static u32 sky2_supported_modes(const struct sky2_hw *hw) 2364static u32 sky2_supported_modes(const struct sky2_hw *hw)
@@ -2852,11 +2882,11 @@ static int sky2_set_coalesce(struct net_device *dev,
2852 (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax)) 2882 (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
2853 return -EINVAL; 2883 return -EINVAL;
2854 2884
2855 if (ecmd->tx_max_coalesced_frames > 0xffff) 2885 if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
2856 return -EINVAL; 2886 return -EINVAL;
2857 if (ecmd->rx_max_coalesced_frames > 0xff) 2887 if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
2858 return -EINVAL; 2888 return -EINVAL;
2859 if (ecmd->rx_max_coalesced_frames_irq > 0xff) 2889 if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
2860 return -EINVAL; 2890 return -EINVAL;
2861 2891
2862 if (ecmd->tx_coalesce_usecs == 0) 2892 if (ecmd->tx_coalesce_usecs == 0)
@@ -3092,61 +3122,6 @@ static void __devinit sky2_show_addr(struct net_device *dev)
3092 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 3122 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3093} 3123}
3094 3124
3095/* Handle software interrupt used during MSI test */
3096static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id,
3097 struct pt_regs *regs)
3098{
3099 struct sky2_hw *hw = dev_id;
3100 u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
3101
3102 if (status == 0)
3103 return IRQ_NONE;
3104
3105 if (status & Y2_IS_IRQ_SW) {
3106 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
3107 hw->msi = 1;
3108 }
3109 sky2_write32(hw, B0_Y2_SP_ICR, 2);
3110
3111 sky2_read32(hw, B0_IMSK);
3112 return IRQ_HANDLED;
3113}
3114
3115/* Test interrupt path by forcing a a software IRQ */
3116static int __devinit sky2_test_msi(struct sky2_hw *hw)
3117{
3118 struct pci_dev *pdev = hw->pdev;
3119 int i, err;
3120
3121 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
3122
3123 err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw);
3124 if (err) {
3125 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3126 pci_name(pdev), pdev->irq);
3127 return err;
3128 }
3129
3130 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
3131 wmb();
3132
3133 for (i = 0; i < 10; i++) {
3134 barrier();
3135 if (hw->msi)
3136 goto found;
3137 mdelay(1);
3138 }
3139
3140 err = -EOPNOTSUPP;
3141 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
3142 found:
3143 sky2_write32(hw, B0_IMSK, 0);
3144
3145 free_irq(pdev->irq, hw);
3146
3147 return err;
3148}
3149
3150static int __devinit sky2_probe(struct pci_dev *pdev, 3125static int __devinit sky2_probe(struct pci_dev *pdev,
3151 const struct pci_device_id *ent) 3126 const struct pci_device_id *ent)
3152{ 3127{
@@ -3198,17 +3173,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3198 } 3173 }
3199 } 3174 }
3200 3175
3201#ifdef __BIG_ENDIAN
3202 /* byte swap descriptors in hardware */
3203 {
3204 u32 reg;
3205
3206 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
3207 reg |= PCI_REV_DESC;
3208 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
3209 }
3210#endif
3211
3212 err = -ENOMEM; 3176 err = -ENOMEM;
3213 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3177 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3214 if (!hw) { 3178 if (!hw) {
@@ -3226,6 +3190,18 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3226 goto err_out_free_hw; 3190 goto err_out_free_hw;
3227 } 3191 }
3228 hw->pm_cap = pm_cap; 3192 hw->pm_cap = pm_cap;
3193 spin_lock_init(&hw->hw_lock);
3194
3195#ifdef __BIG_ENDIAN
3196 /* byte swap descriptors in hardware */
3197 {
3198 u32 reg;
3199
3200 reg = sky2_pci_read32(hw, PCI_DEV_REG2);
3201 reg |= PCI_REV_DESC;
3202 sky2_pci_write32(hw, PCI_DEV_REG2, reg);
3203 }
3204#endif
3229 3205
3230 /* ring for status responses */ 3206 /* ring for status responses */
3231 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES, 3207 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
@@ -3267,20 +3243,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3267 } 3243 }
3268 } 3244 }
3269 3245
3270 if (!disable_msi && pci_enable_msi(pdev) == 0) {
3271 err = sky2_test_msi(hw);
3272 if (err == -EOPNOTSUPP) {
3273 /* MSI test failed, go back to INTx mode */
3274 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
3275 "switching to INTx mode. Please report this failure to "
3276 "the PCI maintainer and include system chipset information.\n",
3277 pci_name(pdev));
3278 pci_disable_msi(pdev);
3279 }
3280 else if (err)
3281 goto err_out_unregister;
3282 }
3283
3284 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ | SA_SAMPLE_RANDOM, 3246 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ | SA_SAMPLE_RANDOM,
3285 DRV_NAME, hw); 3247 DRV_NAME, hw);
3286 if (err) { 3248 if (err) {
@@ -3297,8 +3259,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3297 return 0; 3259 return 0;
3298 3260
3299err_out_unregister: 3261err_out_unregister:
3300 if (hw->msi)
3301 pci_disable_msi(pdev);
3302 if (dev1) { 3262 if (dev1) {
3303 unregister_netdev(dev1); 3263 unregister_netdev(dev1);
3304 free_netdev(dev1); 3264 free_netdev(dev1);
@@ -3341,8 +3301,6 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3341 sky2_read8(hw, B0_CTST); 3301 sky2_read8(hw, B0_CTST);
3342 3302
3343 free_irq(pdev->irq, hw); 3303 free_irq(pdev->irq, hw);
3344 if (hw->msi)
3345 pci_disable_msi(pdev);
3346 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); 3304 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3347 pci_release_regions(pdev); 3305 pci_release_regions(pdev);
3348 pci_disable_device(pdev); 3306 pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index fd12c289a238..dce955c76f3c 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -5,14 +5,22 @@
5#define _SKY2_H 5#define _SKY2_H
6 6
7/* PCI config registers */ 7/* PCI config registers */
8#define PCI_DEV_REG1 0x40 8enum {
9#define PCI_DEV_REG2 0x44 9 PCI_DEV_REG1 = 0x40,
10#define PCI_DEV_STATUS 0x7c 10 PCI_DEV_REG2 = 0x44,
11#define PCI_OS_PCI_X (1<<26) 11 PCI_DEV_STATUS = 0x7c,
12 PCI_DEV_REG3 = 0x80,
13 PCI_DEV_REG4 = 0x84,
14 PCI_DEV_REG5 = 0x88,
15};
12 16
13#define PEX_LNK_STAT 0xf2 17enum {
14#define PEX_UNC_ERR_STAT 0x104 18 PEX_DEV_CAP = 0xe4,
15#define PEX_DEV_CTRL 0xe8 19 PEX_DEV_CTRL = 0xe8,
20 PEX_DEV_STA = 0xea,
21 PEX_LNK_STAT = 0xf2,
22 PEX_UNC_ERR_STAT= 0x104,
23};
16 24
17/* Yukon-2 */ 25/* Yukon-2 */
18enum pci_dev_reg_1 { 26enum pci_dev_reg_1 {
@@ -37,6 +45,25 @@ enum pci_dev_reg_2 {
37 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ 45 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
38}; 46};
39 47
48/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
49enum pci_dev_reg_4 {
50 /* (Link Training & Status State Machine) */
51 P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
52 /* (Active State Power Management) */
53 P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
54 P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */
55 P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */
56 P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */
57
58 P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */
59 P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */
60 P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */
61 P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */
62 P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */
63 P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
64 | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
65};
66
40 67
41#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 68#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
42 PCI_STATUS_SIG_SYSTEM_ERROR | \ 69 PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -507,6 +534,16 @@ enum {
507}; 534};
508#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) 535#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
509 536
537/* Q_F 32 bit Flag Register */
538enum {
539 F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
540 F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
541 F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
542 F_WM_REACHED = 1<<25, /* Watermark reached */
543 F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
544 F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
545 F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
546};
510 547
511/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ 548/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
512enum { 549enum {
@@ -909,10 +946,12 @@ enum {
909 PHY_BCOM_ID1_C0 = 0x6044, 946 PHY_BCOM_ID1_C0 = 0x6044,
910 PHY_BCOM_ID1_C5 = 0x6047, 947 PHY_BCOM_ID1_C5 = 0x6047,
911 948
912 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ 949 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
913 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ 950 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
914 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ 951 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
915 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ 952 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
953 PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */
954 PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */
916}; 955};
917 956
918/* Advertisement register bits */ 957/* Advertisement register bits */
@@ -1837,11 +1876,11 @@ struct sky2_port {
1837struct sky2_hw { 1876struct sky2_hw {
1838 void __iomem *regs; 1877 void __iomem *regs;
1839 struct pci_dev *pdev; 1878 struct pci_dev *pdev;
1840 u32 intr_mask;
1841 struct net_device *dev[2]; 1879 struct net_device *dev[2];
1880 spinlock_t hw_lock;
1881 u32 intr_mask;
1842 1882
1843 int pm_cap; 1883 int pm_cap;
1844 int msi;
1845 u8 chip_id; 1884 u8 chip_id;
1846 u8 chip_rev; 1885 u8 chip_rev;
1847 u8 copper; 1886 u8 copper;
@@ -1912,4 +1951,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
1912 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); 1951 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
1913 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); 1952 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
1914} 1953}
1954
1955/* PCI config space access */
1956static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
1957{
1958 return sky2_read32(hw, Y2_CFG_SPC + reg);
1959}
1960
1961static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
1962{
1963 return sky2_read16(hw, Y2_CFG_SPC + reg);
1964}
1965
1966static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
1967{
1968 sky2_write32(hw, Y2_CFG_SPC + reg, val);
1969}
1970
1971static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
1972{
1973 sky2_write16(hw, Y2_CFG_SPC + reg, val);
1974}
1915#endif 1975#endif
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e7dc653d5bd6..e8e92c853e89 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9408,6 +9408,15 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9408 return 0; 9408 return 0;
9409 if (venid == PCI_VENDOR_ID_SUN) 9409 if (venid == PCI_VENDOR_ID_SUN)
9410 return 1; 9410 return 1;
9411
9412 /* TG3 chips onboard the SunBlade-2500 don't have the
9413 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9414 * are distinguishable from non-Sun variants by being
9415 * named "network" by the firmware. Non-Sun cards will
9416 * show up as being named "ethernet".
9417 */
9418 if (!strcmp(pcp->prom_name, "network"))
9419 return 1;
9411 } 9420 }
9412 return 0; 9421 return 0;
9413} 9422}
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c2506b56a186..12076f8f942c 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -536,6 +536,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
536 u16 device_id; 536 u16 device_id;
537 int reg, rc = -ENODEV; 537 int reg, rc = -ENODEV;
538 538
539#ifdef CONFIG_PCI
539 if (pdev) { 540 if (pdev) {
540 rc = pci_enable_device(pdev); 541 rc = pci_enable_device(pdev);
541 if (rc) 542 if (rc)
@@ -547,6 +548,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
547 goto err_out; 548 goto err_out;
548 } 549 }
549 } 550 }
551#endif /* CONFIG_PCI */
550 552
551 dev = alloc_etherdev(sizeof(TLanPrivateInfo)); 553 dev = alloc_etherdev(sizeof(TLanPrivateInfo));
552 if (dev == NULL) { 554 if (dev == NULL) {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index c2d5907dc8e0..ed1f837c8fda 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1106,6 +1106,9 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1106 1106
1107 for (i = 0; i < vptr->options.numrx; i++) { 1107 for (i = 0; i < vptr->options.numrx; i++) {
1108 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); 1108 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
1109 struct rx_desc *rd = vptr->rd_ring + i;
1110
1111 memset(rd, 0, sizeof(*rd));
1109 1112
1110 if (!rd_info->skb) 1113 if (!rd_info->skb)
1111 continue; 1114 continue;
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 8bc0b528548f..f8f4503475f9 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -877,7 +877,6 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
877 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), 877 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777),
878 PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), 878 PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000),
879 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), 879 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
880 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
881 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), 880 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002),
882 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b), 881 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b),
883 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), 882 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612),
@@ -891,6 +890,10 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
891 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), 890 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
892 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), 891 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
893 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), 892 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
893 PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "INTERSIL",
894 0x74c5e40d),
895 PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
896 0x4b801a17),
894 PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus", 897 PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
895 0x7a954bd9, 0x74be00c6), 898 0x7a954bd9, 0x74be00c6),
896 PCMCIA_DEVICE_PROD_ID1234( 899 PCMCIA_DEVICE_PROD_ID1234(