aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/irda/irda-usb.c90
-rw-r--r--drivers/net/irda/irda-usb.h7
-rw-r--r--drivers/net/r8169.c189
-rw-r--r--drivers/net/sis190.c4
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/skge.c85
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c204
-rw-r--r--drivers/net/sky2.h83
-rw-r--r--drivers/net/tg3.c9
-rw-r--r--drivers/net/tlan.c2
-rw-r--r--drivers/net/tokenring/smctr.h2
-rw-r--r--drivers/net/wireless/ipw2200.c6
-rw-r--r--drivers/net/wireless/wavelan_cs.c16
17 files changed, 513 insertions, 197 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2610cedd55c7..840bfed312f6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2020,8 +2020,8 @@ config SIS190
2020 will be called sis190. This is recommended. 2020 will be called sis190. This is recommended.
2021 2021
2022config SKGE 2022config SKGE
2023 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" 2023 tristate "New SysKonnect GigaEthernet support"
2024 depends on PCI && EXPERIMENTAL 2024 depends on PCI
2025 select CRC32 2025 select CRC32
2026 ---help--- 2026 ---help---
2027 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx 2027 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
@@ -2082,7 +2082,6 @@ config SK98LIN
2082 - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter 2082 - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter
2083 - Allied Telesyn AT-2971T Gigabit Ethernet Adapter 2083 - Allied Telesyn AT-2971T Gigabit Ethernet Adapter
2084 - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45 2084 - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45
2085 - DGE-530T Gigabit Ethernet Adapter
2086 - EG1032 v2 Instant Gigabit Network Adapter 2085 - EG1032 v2 Instant Gigabit Network Adapter
2087 - EG1064 v2 Instant Gigabit Network Adapter 2086 - EG1064 v2 Instant Gigabit Network Adapter
2088 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit) 2087 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e0f51afec778..bcf9f17daf0d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1581,6 +1581,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1581 printk(KERN_INFO DRV_NAME 1581 printk(KERN_INFO DRV_NAME
1582 ": %s: %s not enslaved\n", 1582 ": %s: %s not enslaved\n",
1583 bond_dev->name, slave_dev->name); 1583 bond_dev->name, slave_dev->name);
1584 write_unlock_bh(&bond->lock);
1584 return -EINVAL; 1585 return -EINVAL;
1585 } 1586 }
1586 1587
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 1b699259b4ec..31fb2d75dc44 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -57,7 +57,7 @@ struct ifb_private {
57 struct sk_buff_head tq; 57 struct sk_buff_head tq;
58}; 58};
59 59
60static int numifbs = 1; 60static int numifbs = 2;
61 61
62static void ri_tasklet(unsigned long dev); 62static void ri_tasklet(unsigned long dev);
63static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); 63static int ifb_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index fa176ffb4ad5..8936058a3cce 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -108,6 +108,7 @@ static void irda_usb_close(struct irda_usb_cb *self);
108static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs); 108static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs);
109static void write_bulk_callback(struct urb *urb, struct pt_regs *regs); 109static void write_bulk_callback(struct urb *urb, struct pt_regs *regs);
110static void irda_usb_receive(struct urb *urb, struct pt_regs *regs); 110static void irda_usb_receive(struct urb *urb, struct pt_regs *regs);
111static void irda_usb_rx_defer_expired(unsigned long data);
111static int irda_usb_net_open(struct net_device *dev); 112static int irda_usb_net_open(struct net_device *dev);
112static int irda_usb_net_close(struct net_device *dev); 113static int irda_usb_net_close(struct net_device *dev);
113static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 114static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -677,6 +678,12 @@ static void irda_usb_net_timeout(struct net_device *netdev)
677 * on the interrupt pipe and hang the Rx URB only when an interrupt is 678 * on the interrupt pipe and hang the Rx URB only when an interrupt is
678 * received. 679 * received.
679 * Jean II 680 * Jean II
681 *
682 * Note : don't read the above as what we are currently doing, but as
683 * something we could do with KC dongle. Also don't forget that the
684 * interrupt pipe is not part of the original standard, so this would
685 * need to be optional...
686 * Jean II
680 */ 687 */
681 688
682/*------------------------------------------------------------------*/ 689/*------------------------------------------------------------------*/
@@ -704,10 +711,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
704 /* Reinitialize URB */ 711 /* Reinitialize URB */
705 usb_fill_bulk_urb(urb, self->usbdev, 712 usb_fill_bulk_urb(urb, self->usbdev,
706 usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep), 713 usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
707 skb->data, skb->truesize, 714 skb->data, IRDA_SKB_MAX_MTU,
708 irda_usb_receive, skb); 715 irda_usb_receive, skb);
709 /* Note : unlink *must* be synchronous because of the code in
710 * irda_usb_net_close() -> free the skb - Jean II */
711 urb->status = 0; 716 urb->status = 0;
712 717
713 /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */ 718 /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
@@ -734,6 +739,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
734 struct irda_skb_cb *cb; 739 struct irda_skb_cb *cb;
735 struct sk_buff *newskb; 740 struct sk_buff *newskb;
736 struct sk_buff *dataskb; 741 struct sk_buff *dataskb;
742 struct urb *next_urb;
737 int docopy; 743 int docopy;
738 744
739 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); 745 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
@@ -755,20 +761,37 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
755 if (urb->status != 0) { 761 if (urb->status != 0) {
756 switch (urb->status) { 762 switch (urb->status) {
757 case -EILSEQ: 763 case -EILSEQ:
758 self->stats.rx_errors++;
759 self->stats.rx_crc_errors++; 764 self->stats.rx_crc_errors++;
760 break; 765 /* Also precursor to a hot-unplug on UHCI. */
766 /* Fallthrough... */
761 case -ECONNRESET: /* -104 */ 767 case -ECONNRESET: /* -104 */
762 IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags); 768 /* Random error, if I remember correctly */
763 /* uhci_cleanup_unlink() is going to kill the Rx 769 /* uhci_cleanup_unlink() is going to kill the Rx
764 * URB just after we return. No problem, at this 770 * URB just after we return. No problem, at this
765 * point the URB will be idle ;-) - Jean II */ 771 * point the URB will be idle ;-) - Jean II */
766 break; 772 case -ESHUTDOWN: /* -108 */
773 /* That's usually a hot-unplug. Submit will fail... */
774 case -ETIMEDOUT: /* -110 */
775 /* Usually precursor to a hot-unplug on OHCI. */
767 default: 776 default:
768 IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags); 777 self->stats.rx_errors++;
778 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
769 break; 779 break;
770 } 780 }
771 goto done; 781 /* If we received an error, we don't want to resubmit the
782 * Rx URB straight away but to give the USB layer a little
783 * bit of breathing room.
784 * We are in the USB thread context, therefore there is a
785 * danger of recursion (new URB we submit fails, we come
786 * back here).
787 * With recent USB stack (2.6.15+), I'm seeing that on
788 * hot unplug of the dongle...
789 * Lowest effective timer is 10ms...
790 * Jean II */
791 self->rx_defer_timer.function = &irda_usb_rx_defer_expired;
792 self->rx_defer_timer.data = (unsigned long) urb;
793 mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
794 return;
772 } 795 }
773 796
774 /* Check for empty frames */ 797 /* Check for empty frames */
@@ -845,13 +868,45 @@ done:
845 * idle slot.... 868 * idle slot....
846 * Jean II */ 869 * Jean II */
847 /* Note : with this scheme, we could submit the idle URB before 870 /* Note : with this scheme, we could submit the idle URB before
848 * processing the Rx URB. Another time... Jean II */ 871 * processing the Rx URB. I don't think it would buy us anything as
872 * we are running in the USB thread context. Jean II */
873 next_urb = self->idle_rx_urb;
849 874
850 /* Submit the idle URB to replace the URB we've just received */
851 irda_usb_submit(self, skb, self->idle_rx_urb);
852 /* Recycle Rx URB : Now, the idle URB is the present one */ 875 /* Recycle Rx URB : Now, the idle URB is the present one */
853 urb->context = NULL; 876 urb->context = NULL;
854 self->idle_rx_urb = urb; 877 self->idle_rx_urb = urb;
878
879 /* Submit the idle URB to replace the URB we've just received.
880 * Do it last to avoid race conditions... Jean II */
881 irda_usb_submit(self, skb, next_urb);
882}
883
884/*------------------------------------------------------------------*/
885/*
886 * In case of errors, we want the USB layer to have time to recover.
887 * Now, it is time to resubmit ouur Rx URB...
888 */
889static void irda_usb_rx_defer_expired(unsigned long data)
890{
891 struct urb *urb = (struct urb *) data;
892 struct sk_buff *skb = (struct sk_buff *) urb->context;
893 struct irda_usb_cb *self;
894 struct irda_skb_cb *cb;
895 struct urb *next_urb;
896
897 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
898
899 /* Find ourselves */
900 cb = (struct irda_skb_cb *) skb->cb;
901 IRDA_ASSERT(cb != NULL, return;);
902 self = (struct irda_usb_cb *) cb->context;
903 IRDA_ASSERT(self != NULL, return;);
904
905 /* Same stuff as when Rx is done, see above... */
906 next_urb = self->idle_rx_urb;
907 urb->context = NULL;
908 self->idle_rx_urb = urb;
909 irda_usb_submit(self, skb, next_urb);
855} 910}
856 911
857/*------------------------------------------------------------------*/ 912/*------------------------------------------------------------------*/
@@ -990,6 +1045,9 @@ static int irda_usb_net_close(struct net_device *netdev)
990 /* Stop network Tx queue */ 1045 /* Stop network Tx queue */
991 netif_stop_queue(netdev); 1046 netif_stop_queue(netdev);
992 1047
1048 /* Kill defered Rx URB */
1049 del_timer(&self->rx_defer_timer);
1050
993 /* Deallocate all the Rx path buffers (URBs and skb) */ 1051 /* Deallocate all the Rx path buffers (URBs and skb) */
994 for (i = 0; i < IU_MAX_RX_URBS; i++) { 1052 for (i = 0; i < IU_MAX_RX_URBS; i++) {
995 struct urb *urb = self->rx_urb[i]; 1053 struct urb *urb = self->rx_urb[i];
@@ -1365,6 +1423,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1365 self = net->priv; 1423 self = net->priv;
1366 self->netdev = net; 1424 self->netdev = net;
1367 spin_lock_init(&self->lock); 1425 spin_lock_init(&self->lock);
1426 init_timer(&self->rx_defer_timer);
1368 1427
1369 /* Create all of the needed urbs */ 1428 /* Create all of the needed urbs */
1370 for (i = 0; i < IU_MAX_RX_URBS; i++) { 1429 for (i = 0; i < IU_MAX_RX_URBS; i++) {
@@ -1498,6 +1557,9 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1498 * This will stop/desactivate the Tx path. - Jean II */ 1557 * This will stop/desactivate the Tx path. - Jean II */
1499 self->present = 0; 1558 self->present = 0;
1500 1559
1560 /* Kill defered Rx URB */
1561 del_timer(&self->rx_defer_timer);
1562
1501 /* We need to have irq enabled to unlink the URBs. That's OK, 1563 /* We need to have irq enabled to unlink the URBs. That's OK,
1502 * at this point the Tx path is gone - Jean II */ 1564 * at this point the Tx path is gone - Jean II */
1503 spin_unlock_irqrestore(&self->lock, flags); 1565 spin_unlock_irqrestore(&self->lock, flags);
@@ -1507,11 +1569,11 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1507 /* Accept no more transmissions */ 1569 /* Accept no more transmissions */
1508 /*netif_device_detach(self->netdev);*/ 1570 /*netif_device_detach(self->netdev);*/
1509 netif_stop_queue(self->netdev); 1571 netif_stop_queue(self->netdev);
1510 /* Stop all the receive URBs */ 1572 /* Stop all the receive URBs. Must be synchronous. */
1511 for (i = 0; i < IU_MAX_RX_URBS; i++) 1573 for (i = 0; i < IU_MAX_RX_URBS; i++)
1512 usb_kill_urb(self->rx_urb[i]); 1574 usb_kill_urb(self->rx_urb[i]);
1513 /* Cancel Tx and speed URB. 1575 /* Cancel Tx and speed URB.
1514 * Toggle flags to make sure it's synchronous. */ 1576 * Make sure it's synchronous to avoid races. */
1515 usb_kill_urb(self->tx_urb); 1577 usb_kill_urb(self->tx_urb);
1516 usb_kill_urb(self->speed_urb); 1578 usb_kill_urb(self->speed_urb);
1517 } 1579 }
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index bd8f66542322..4026af42dd47 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -136,8 +136,6 @@ struct irda_usb_cb {
136 __u16 bulk_out_mtu; /* Max Tx packet size in bytes */ 136 __u16 bulk_out_mtu; /* Max Tx packet size in bytes */
137 __u8 bulk_int_ep; /* Interrupt Endpoint assignments */ 137 __u8 bulk_int_ep; /* Interrupt Endpoint assignments */
138 138
139 wait_queue_head_t wait_q; /* for timeouts */
140
141 struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */ 139 struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */
142 struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */ 140 struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
143 struct urb *tx_urb; /* URB used to send data frames */ 141 struct urb *tx_urb; /* URB used to send data frames */
@@ -147,17 +145,18 @@ struct irda_usb_cb {
147 struct net_device_stats stats; 145 struct net_device_stats stats;
148 struct irlap_cb *irlap; /* The link layer we are binded to */ 146 struct irlap_cb *irlap; /* The link layer we are binded to */
149 struct qos_info qos; 147 struct qos_info qos;
150 hashbin_t *tx_list; /* Queued transmit skb's */
151 char *speed_buff; /* Buffer for speed changes */ 148 char *speed_buff; /* Buffer for speed changes */
152 149
153 struct timeval stamp; 150 struct timeval stamp;
154 struct timeval now; 151 struct timeval now;
155 152
156 spinlock_t lock; /* For serializing operations */ 153 spinlock_t lock; /* For serializing Tx operations */
157 154
158 __u16 xbofs; /* Current xbofs setting */ 155 __u16 xbofs; /* Current xbofs setting */
159 __s16 new_xbofs; /* xbofs we need to set */ 156 __s16 new_xbofs; /* xbofs we need to set */
160 __u32 speed; /* Current speed */ 157 __u32 speed; /* Current speed */
161 __s32 new_speed; /* speed we need to set */ 158 __s32 new_speed; /* speed we need to set */
159
160 struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
162}; 161};
163 162
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 6e1018448eea..8cc0d0bbdf50 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -287,6 +287,20 @@ enum RTL8169_register_content {
287 TxInterFrameGapShift = 24, 287 TxInterFrameGapShift = 24,
288 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 288 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
289 289
290 /* Config1 register p.24 */
291 PMEnable = (1 << 0), /* Power Management Enable */
292
293 /* Config3 register p.25 */
294 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
295 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
296
297 /* Config5 register p.27 */
298 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
299 MWF = (1 << 5), /* Accept Multicast wakeup frame */
300 UWF = (1 << 4), /* Accept Unicast wakeup frame */
301 LanWake = (1 << 1), /* LanWake enable/disable */
302 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
303
290 /* TBICSR p.28 */ 304 /* TBICSR p.28 */
291 TBIReset = 0x80000000, 305 TBIReset = 0x80000000,
292 TBILoopback = 0x40000000, 306 TBILoopback = 0x40000000,
@@ -433,6 +447,7 @@ struct rtl8169_private {
433 unsigned int (*phy_reset_pending)(void __iomem *); 447 unsigned int (*phy_reset_pending)(void __iomem *);
434 unsigned int (*link_ok)(void __iomem *); 448 unsigned int (*link_ok)(void __iomem *);
435 struct work_struct task; 449 struct work_struct task;
450 unsigned wol_enabled : 1;
436}; 451};
437 452
438MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 453MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -607,6 +622,80 @@ static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
607 *duplex = p->duplex; 622 *duplex = p->duplex;
608} 623}
609 624
625static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
626{
627 struct rtl8169_private *tp = netdev_priv(dev);
628 void __iomem *ioaddr = tp->mmio_addr;
629 u8 options;
630
631 wol->wolopts = 0;
632
633#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
634 wol->supported = WAKE_ANY;
635
636 spin_lock_irq(&tp->lock);
637
638 options = RTL_R8(Config1);
639 if (!(options & PMEnable))
640 goto out_unlock;
641
642 options = RTL_R8(Config3);
643 if (options & LinkUp)
644 wol->wolopts |= WAKE_PHY;
645 if (options & MagicPacket)
646 wol->wolopts |= WAKE_MAGIC;
647
648 options = RTL_R8(Config5);
649 if (options & UWF)
650 wol->wolopts |= WAKE_UCAST;
651 if (options & BWF)
652 wol->wolopts |= WAKE_BCAST;
653 if (options & MWF)
654 wol->wolopts |= WAKE_MCAST;
655
656out_unlock:
657 spin_unlock_irq(&tp->lock);
658}
659
660static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
661{
662 struct rtl8169_private *tp = netdev_priv(dev);
663 void __iomem *ioaddr = tp->mmio_addr;
664 int i;
665 static struct {
666 u32 opt;
667 u16 reg;
668 u8 mask;
669 } cfg[] = {
670 { WAKE_ANY, Config1, PMEnable },
671 { WAKE_PHY, Config3, LinkUp },
672 { WAKE_MAGIC, Config3, MagicPacket },
673 { WAKE_UCAST, Config5, UWF },
674 { WAKE_BCAST, Config5, BWF },
675 { WAKE_MCAST, Config5, MWF },
676 { WAKE_ANY, Config5, LanWake }
677 };
678
679 spin_lock_irq(&tp->lock);
680
681 RTL_W8(Cfg9346, Cfg9346_Unlock);
682
683 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
684 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
685 if (wol->wolopts & cfg[i].opt)
686 options |= cfg[i].mask;
687 RTL_W8(cfg[i].reg, options);
688 }
689
690 RTL_W8(Cfg9346, Cfg9346_Lock);
691
692 tp->wol_enabled = (wol->wolopts) ? 1 : 0;
693
694 spin_unlock_irq(&tp->lock);
695
696 return 0;
697}
698
610static void rtl8169_get_drvinfo(struct net_device *dev, 699static void rtl8169_get_drvinfo(struct net_device *dev,
611 struct ethtool_drvinfo *info) 700 struct ethtool_drvinfo *info)
612{ 701{
@@ -1025,6 +1114,8 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
1025 .get_tso = ethtool_op_get_tso, 1114 .get_tso = ethtool_op_get_tso,
1026 .set_tso = ethtool_op_set_tso, 1115 .set_tso = ethtool_op_set_tso,
1027 .get_regs = rtl8169_get_regs, 1116 .get_regs = rtl8169_get_regs,
1117 .get_wol = rtl8169_get_wol,
1118 .set_wol = rtl8169_set_wol,
1028 .get_strings = rtl8169_get_strings, 1119 .get_strings = rtl8169_get_strings,
1029 .get_stats_count = rtl8169_get_stats_count, 1120 .get_stats_count = rtl8169_get_stats_count,
1030 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1121 .get_ethtool_stats = rtl8169_get_ethtool_stats,
@@ -1442,6 +1533,11 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
1442 } 1533 }
1443 tp->chipset = i; 1534 tp->chipset = i;
1444 1535
1536 RTL_W8(Cfg9346, Cfg9346_Unlock);
1537 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1538 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1539 RTL_W8(Cfg9346, Cfg9346_Lock);
1540
1445 *ioaddr_out = ioaddr; 1541 *ioaddr_out = ioaddr;
1446 *dev_out = dev; 1542 *dev_out = dev;
1447out: 1543out:
@@ -1612,49 +1708,6 @@ rtl8169_remove_one(struct pci_dev *pdev)
1612 pci_set_drvdata(pdev, NULL); 1708 pci_set_drvdata(pdev, NULL);
1613} 1709}
1614 1710
1615#ifdef CONFIG_PM
1616
1617static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
1618{
1619 struct net_device *dev = pci_get_drvdata(pdev);
1620 struct rtl8169_private *tp = netdev_priv(dev);
1621 void __iomem *ioaddr = tp->mmio_addr;
1622 unsigned long flags;
1623
1624 if (!netif_running(dev))
1625 return 0;
1626
1627 netif_device_detach(dev);
1628 netif_stop_queue(dev);
1629 spin_lock_irqsave(&tp->lock, flags);
1630
1631 /* Disable interrupts, stop Rx and Tx */
1632 RTL_W16(IntrMask, 0);
1633 RTL_W8(ChipCmd, 0);
1634
1635 /* Update the error counts. */
1636 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
1637 RTL_W32(RxMissed, 0);
1638 spin_unlock_irqrestore(&tp->lock, flags);
1639
1640 return 0;
1641}
1642
1643static int rtl8169_resume(struct pci_dev *pdev)
1644{
1645 struct net_device *dev = pci_get_drvdata(pdev);
1646
1647 if (!netif_running(dev))
1648 return 0;
1649
1650 netif_device_attach(dev);
1651 rtl8169_hw_start(dev);
1652
1653 return 0;
1654}
1655
1656#endif /* CONFIG_PM */
1657
1658static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, 1711static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1659 struct net_device *dev) 1712 struct net_device *dev)
1660{ 1713{
@@ -2700,6 +2753,56 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
2700 return &tp->stats; 2753 return &tp->stats;
2701} 2754}
2702 2755
2756#ifdef CONFIG_PM
2757
2758static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
2759{
2760 struct net_device *dev = pci_get_drvdata(pdev);
2761 struct rtl8169_private *tp = netdev_priv(dev);
2762 void __iomem *ioaddr = tp->mmio_addr;
2763
2764 if (!netif_running(dev))
2765 goto out;
2766
2767 netif_device_detach(dev);
2768 netif_stop_queue(dev);
2769
2770 spin_lock_irq(&tp->lock);
2771
2772 rtl8169_asic_down(ioaddr);
2773
2774 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
2775 RTL_W32(RxMissed, 0);
2776
2777 spin_unlock_irq(&tp->lock);
2778
2779 pci_save_state(pdev);
2780 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
2781 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2782out:
2783 return 0;
2784}
2785
2786static int rtl8169_resume(struct pci_dev *pdev)
2787{
2788 struct net_device *dev = pci_get_drvdata(pdev);
2789
2790 if (!netif_running(dev))
2791 goto out;
2792
2793 netif_device_attach(dev);
2794
2795 pci_set_power_state(pdev, PCI_D0);
2796 pci_restore_state(pdev);
2797 pci_enable_wake(pdev, PCI_D0, 0);
2798
2799 rtl8169_schedule_work(dev, rtl8169_reset_task);
2800out:
2801 return 0;
2802}
2803
2804#endif /* CONFIG_PM */
2805
2703static struct pci_driver rtl8169_pci_driver = { 2806static struct pci_driver rtl8169_pci_driver = {
2704 .name = MODULENAME, 2807 .name = MODULENAME,
2705 .id_table = rtl8169_pci_tbl, 2808 .id_table = rtl8169_pci_tbl,
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b420182eec4b..ed4bc91638d2 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1791,6 +1791,8 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1791 goto out; 1791 goto out;
1792 } 1792 }
1793 1793
1794 pci_set_drvdata(pdev, dev);
1795
1794 tp = netdev_priv(dev); 1796 tp = netdev_priv(dev);
1795 ioaddr = tp->mmio_addr; 1797 ioaddr = tp->mmio_addr;
1796 1798
@@ -1827,8 +1829,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1827 if (rc < 0) 1829 if (rc < 0)
1828 goto err_remove_mii; 1830 goto err_remove_mii;
1829 1831
1830 pci_set_drvdata(pdev, dev);
1831
1832 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " 1832 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1833 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 1833 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1834 pci_name(pdev), sis_chip_info[ent->driver_data].name, 1834 pci_name(pdev), sis_chip_info[ent->driver_data].name,
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 3d95fa20cd88..7a952fe60be2 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -540,7 +540,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
540 printk("%2.2x.\n", net_dev->dev_addr[i]); 540 printk("%2.2x.\n", net_dev->dev_addr[i]);
541 541
542 /* Detect Wake on Lan support */ 542 /* Detect Wake on Lan support */
543 ret = inl(CFGPMC & PMESP); 543 ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27;
544 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0) 544 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
545 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name); 545 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
546 546
@@ -2040,7 +2040,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
2040 2040
2041 if (wol->wolopts == 0) { 2041 if (wol->wolopts == 0) {
2042 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); 2042 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2043 cfgpmcsr |= ~PME_EN; 2043 cfgpmcsr &= ~PME_EN;
2044 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); 2044 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2045 outl(pmctrl_bits, pmctrl_addr); 2045 outl(pmctrl_bits, pmctrl_addr);
2046 if (netif_msg_wol(sis_priv)) 2046 if (netif_msg_wol(sis_priv))
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bf55a4cfb3d2..25e028b7ce48 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
879 int i; 879 int i;
880 880
881 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 881 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
882 xm_read16(hw, port, XM_PHY_DATA); 882 *val = xm_read16(hw, port, XM_PHY_DATA);
883 883
884 /* Need to wait for external PHY */
885 for (i = 0; i < PHY_RETRIES; i++) { 884 for (i = 0; i < PHY_RETRIES; i++) {
886 udelay(1);
887 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) 885 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
888 goto ready; 886 goto ready;
887 udelay(1);
889 } 888 }
890 889
891 return -ETIMEDOUT; 890 return -ETIMEDOUT;
@@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
918 917
919 ready: 918 ready:
920 xm_write16(hw, port, XM_PHY_DATA, val); 919 xm_write16(hw, port, XM_PHY_DATA, val);
921 return 0; 920 for (i = 0; i < PHY_RETRIES; i++) {
921 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
922 return 0;
923 udelay(1);
924 }
925 return -ETIMEDOUT;
922} 926}
923 927
924static void genesis_init(struct skge_hw *hw) 928static void genesis_init(struct skge_hw *hw)
@@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1168 u32 r; 1172 u32 r;
1169 const u8 zero[6] = { 0 }; 1173 const u8 zero[6] = { 0 };
1170 1174
1171 /* Clear MIB counters */ 1175 for (i = 0; i < 10; i++) {
1172 xm_write16(hw, port, XM_STAT_CMD, 1176 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
1173 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1177 MFF_SET_MAC_RST);
1174 /* Clear two times according to Errata #3 */ 1178 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
1175 xm_write16(hw, port, XM_STAT_CMD, 1179 goto reset_ok;
1176 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1180 udelay(1);
1181 }
1177 1182
1183 printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
1184
1185 reset_ok:
1178 /* Unreset the XMAC. */ 1186 /* Unreset the XMAC. */
1179 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1187 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1180 1188
@@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1191 r |= GP_DIR_2|GP_IO_2; 1199 r |= GP_DIR_2|GP_IO_2;
1192 1200
1193 skge_write32(hw, B2_GP_IO, r); 1201 skge_write32(hw, B2_GP_IO, r);
1194 skge_read32(hw, B2_GP_IO); 1202
1195 1203
1196 /* Enable GMII interface */ 1204 /* Enable GMII interface */
1197 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); 1205 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
@@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1205 for (i = 1; i < 16; i++) 1213 for (i = 1; i < 16; i++)
1206 xm_outaddr(hw, port, XM_EXM(i), zero); 1214 xm_outaddr(hw, port, XM_EXM(i), zero);
1207 1215
1216 /* Clear MIB counters */
1217 xm_write16(hw, port, XM_STAT_CMD,
1218 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1219 /* Clear two times according to Errata #3 */
1220 xm_write16(hw, port, XM_STAT_CMD,
1221 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1222
1208 /* configure Rx High Water Mark (XM_RX_HI_WM) */ 1223 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1209 xm_write16(hw, port, XM_RX_HI_WM, 1450); 1224 xm_write16(hw, port, XM_RX_HI_WM, 1450);
1210 1225
@@ -1697,6 +1712,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1697 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 1712 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1698 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); 1713 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1699 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); 1714 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1715
1700 if (skge->autoneg == AUTONEG_DISABLE) { 1716 if (skge->autoneg == AUTONEG_DISABLE) {
1701 reg = GM_GPCR_AU_ALL_DIS; 1717 reg = GM_GPCR_AU_ALL_DIS;
1702 gma_write16(hw, port, GM_GP_CTRL, 1718 gma_write16(hw, port, GM_GP_CTRL,
@@ -1704,16 +1720,23 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1704 1720
1705 switch (skge->speed) { 1721 switch (skge->speed) {
1706 case SPEED_1000: 1722 case SPEED_1000:
1723 reg &= ~GM_GPCR_SPEED_100;
1707 reg |= GM_GPCR_SPEED_1000; 1724 reg |= GM_GPCR_SPEED_1000;
1708 /* fallthru */ 1725 break;
1709 case SPEED_100: 1726 case SPEED_100:
1727 reg &= ~GM_GPCR_SPEED_1000;
1710 reg |= GM_GPCR_SPEED_100; 1728 reg |= GM_GPCR_SPEED_100;
1729 break;
1730 case SPEED_10:
1731 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1732 break;
1711 } 1733 }
1712 1734
1713 if (skge->duplex == DUPLEX_FULL) 1735 if (skge->duplex == DUPLEX_FULL)
1714 reg |= GM_GPCR_DUP_FULL; 1736 reg |= GM_GPCR_DUP_FULL;
1715 } else 1737 } else
1716 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 1738 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1739
1717 switch (skge->flow_control) { 1740 switch (skge->flow_control) {
1718 case FLOW_MODE_NONE: 1741 case FLOW_MODE_NONE:
1719 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1742 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -2162,8 +2185,10 @@ static int skge_up(struct net_device *dev)
2162 skge->tx_avail = skge->tx_ring.count - 1; 2185 skge->tx_avail = skge->tx_ring.count - 1;
2163 2186
2164 /* Enable IRQ from port */ 2187 /* Enable IRQ from port */
2188 spin_lock_irq(&hw->hw_lock);
2165 hw->intr_mask |= portirqmask[port]; 2189 hw->intr_mask |= portirqmask[port];
2166 skge_write32(hw, B0_IMSK, hw->intr_mask); 2190 skge_write32(hw, B0_IMSK, hw->intr_mask);
2191 spin_unlock_irq(&hw->hw_lock);
2167 2192
2168 /* Initialize MAC */ 2193 /* Initialize MAC */
2169 spin_lock_bh(&hw->phy_lock); 2194 spin_lock_bh(&hw->phy_lock);
@@ -2221,8 +2246,10 @@ static int skge_down(struct net_device *dev)
2221 else 2246 else
2222 yukon_stop(skge); 2247 yukon_stop(skge);
2223 2248
2249 spin_lock_irq(&hw->hw_lock);
2224 hw->intr_mask &= ~portirqmask[skge->port]; 2250 hw->intr_mask &= ~portirqmask[skge->port];
2225 skge_write32(hw, B0_IMSK, hw->intr_mask); 2251 skge_write32(hw, B0_IMSK, hw->intr_mask);
2252 spin_unlock_irq(&hw->hw_lock);
2226 2253
2227 /* Stop transmitter */ 2254 /* Stop transmitter */
2228 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2255 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
@@ -2670,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget)
2670 2697
2671 /* restart receiver */ 2698 /* restart receiver */
2672 wmb(); 2699 wmb();
2673 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), 2700 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
2674 CSR_START | CSR_IRQ_CL_F);
2675 2701
2676 *budget -= work_done; 2702 *budget -= work_done;
2677 dev->quota -= work_done; 2703 dev->quota -= work_done;
@@ -2679,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget)
2679 if (work_done >= to_do) 2705 if (work_done >= to_do)
2680 return 1; /* not done */ 2706 return 1; /* not done */
2681 2707
2682 netif_rx_complete(dev); 2708 spin_lock_irq(&hw->hw_lock);
2683 hw->intr_mask |= portirqmask[skge->port]; 2709 __netif_rx_complete(dev);
2684 skge_write32(hw, B0_IMSK, hw->intr_mask); 2710 hw->intr_mask |= portirqmask[skge->port];
2685 skge_read32(hw, B0_IMSK); 2711 skge_write32(hw, B0_IMSK, hw->intr_mask);
2712 spin_unlock_irq(&hw->hw_lock);
2686 2713
2687 return 0; 2714 return 0;
2688} 2715}
@@ -2842,18 +2869,10 @@ static void skge_extirq(unsigned long data)
2842 } 2869 }
2843 spin_unlock(&hw->phy_lock); 2870 spin_unlock(&hw->phy_lock);
2844 2871
2845 local_irq_disable(); 2872 spin_lock_irq(&hw->hw_lock);
2846 hw->intr_mask |= IS_EXT_REG; 2873 hw->intr_mask |= IS_EXT_REG;
2847 skge_write32(hw, B0_IMSK, hw->intr_mask); 2874 skge_write32(hw, B0_IMSK, hw->intr_mask);
2848 local_irq_enable(); 2875 spin_unlock_irq(&hw->hw_lock);
2849}
2850
2851static inline void skge_wakeup(struct net_device *dev)
2852{
2853 struct skge_port *skge = netdev_priv(dev);
2854
2855 prefetch(skge->rx_ring.to_clean);
2856 netif_rx_schedule(dev);
2857} 2876}
2858 2877
2859static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) 2878static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2864,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2864 if (status == 0 || status == ~0) /* hotplug or shared irq */ 2883 if (status == 0 || status == ~0) /* hotplug or shared irq */
2865 return IRQ_NONE; 2884 return IRQ_NONE;
2866 2885
2867 status &= hw->intr_mask; 2886 spin_lock(&hw->hw_lock);
2868 if (status & IS_R1_F) { 2887 if (status & IS_R1_F) {
2888 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2869 hw->intr_mask &= ~IS_R1_F; 2889 hw->intr_mask &= ~IS_R1_F;
2870 skge_wakeup(hw->dev[0]); 2890 netif_rx_schedule(hw->dev[0]);
2871 } 2891 }
2872 2892
2873 if (status & IS_R2_F) { 2893 if (status & IS_R2_F) {
2894 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2874 hw->intr_mask &= ~IS_R2_F; 2895 hw->intr_mask &= ~IS_R2_F;
2875 skge_wakeup(hw->dev[1]); 2896 netif_rx_schedule(hw->dev[1]);
2876 } 2897 }
2877 2898
2878 if (status & IS_XA1_F) 2899 if (status & IS_XA1_F)
@@ -2914,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2914 } 2935 }
2915 2936
2916 skge_write32(hw, B0_IMSK, hw->intr_mask); 2937 skge_write32(hw, B0_IMSK, hw->intr_mask);
2938 spin_unlock(&hw->hw_lock);
2917 2939
2918 return IRQ_HANDLED; 2940 return IRQ_HANDLED;
2919} 2941}
@@ -3282,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3282 3304
3283 hw->pdev = pdev; 3305 hw->pdev = pdev;
3284 spin_lock_init(&hw->phy_lock); 3306 spin_lock_init(&hw->phy_lock);
3307 spin_lock_init(&hw->hw_lock);
3285 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); 3308 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
3286 3309
3287 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3310 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 2efdacc290e5..941f12a333b6 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2402,6 +2402,7 @@ struct skge_hw {
2402 2402
2403 struct tasklet_struct ext_tasklet; 2403 struct tasklet_struct ext_tasklet;
2404 spinlock_t phy_lock; 2404 spinlock_t phy_lock;
2405 spinlock_t hw_lock;
2405}; 2406};
2406 2407
2407enum { 2408enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index cae2edf23004..ca8160d68229 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -195,11 +195,11 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
195 pr_debug("sky2_set_power_state %d\n", state); 195 pr_debug("sky2_set_power_state %d\n", state);
196 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 196 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
197 197
198 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control); 198 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
199 vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) && 199 vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
200 (power_control & PCI_PM_CAP_PME_D3cold); 200 (power_control & PCI_PM_CAP_PME_D3cold);
201 201
202 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control); 202 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
203 203
204 power_control |= PCI_PM_CTRL_PME_STATUS; 204 power_control |= PCI_PM_CTRL_PME_STATUS;
205 power_control &= ~(PCI_PM_CTRL_STATE_MASK); 205 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
@@ -223,7 +223,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
223 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 223 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
224 224
225 /* Turn off phy power saving */ 225 /* Turn off phy power saving */
226 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1); 226 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
227 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 227 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
228 228
229 /* looks like this XL is back asswards .. */ 229 /* looks like this XL is back asswards .. */
@@ -232,18 +232,28 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
232 if (hw->ports > 1) 232 if (hw->ports > 1)
233 reg1 |= PCI_Y2_PHY2_COMA; 233 reg1 |= PCI_Y2_PHY2_COMA;
234 } 234 }
235 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); 235
236 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
237 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
238 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
239 reg1 &= P_ASPM_CONTROL_MSK;
240 sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
241 sky2_pci_write32(hw, PCI_DEV_REG5, 0);
242 }
243
244 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
245
236 break; 246 break;
237 247
238 case PCI_D3hot: 248 case PCI_D3hot:
239 case PCI_D3cold: 249 case PCI_D3cold:
240 /* Turn on phy power saving */ 250 /* Turn on phy power saving */
241 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1); 251 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
242 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 252 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
243 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 253 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
244 else 254 else
245 reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 255 reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
246 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); 256 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
247 257
248 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 258 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
249 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 259 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
@@ -265,7 +275,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
265 ret = -1; 275 ret = -1;
266 } 276 }
267 277
268 pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control); 278 sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
269 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 279 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
270 return ret; 280 return ret;
271} 281}
@@ -463,16 +473,31 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
463 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); 473 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
464 } 474 }
465 475
466 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 476 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
477 /* apply fixes in PHY AFE */
478 gm_phy_write(hw, port, 22, 255);
479 /* increase differential signal amplitude in 10BASE-T */
480 gm_phy_write(hw, port, 24, 0xaa99);
481 gm_phy_write(hw, port, 23, 0x2011);
467 482
468 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { 483 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
469 /* turn on 100 Mbps LED (LED_LINK100) */ 484 gm_phy_write(hw, port, 24, 0xa204);
470 ledover |= PHY_M_LED_MO_100(MO_LED_ON); 485 gm_phy_write(hw, port, 23, 0x2002);
471 }
472 486
473 if (ledover) 487 /* set page register to 0 */
474 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 488 gm_phy_write(hw, port, 22, 0);
489 } else {
490 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
491
492 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
493 /* turn on 100 Mbps LED (LED_LINK100) */
494 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
495 }
496
497 if (ledover)
498 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
475 499
500 }
476 /* Enable phy interrupt on auto-negotiation complete (or link up) */ 501 /* Enable phy interrupt on auto-negotiation complete (or link up) */
477 if (sky2->autoneg == AUTONEG_ENABLE) 502 if (sky2->autoneg == AUTONEG_ENABLE)
478 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); 503 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
@@ -520,10 +545,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
520 545
521 switch (sky2->speed) { 546 switch (sky2->speed) {
522 case SPEED_1000: 547 case SPEED_1000:
548 reg &= ~GM_GPCR_SPEED_100;
523 reg |= GM_GPCR_SPEED_1000; 549 reg |= GM_GPCR_SPEED_1000;
524 /* fallthru */ 550 break;
525 case SPEED_100: 551 case SPEED_100:
552 reg &= ~GM_GPCR_SPEED_1000;
526 reg |= GM_GPCR_SPEED_100; 553 reg |= GM_GPCR_SPEED_100;
554 break;
555 case SPEED_10:
556 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
557 break;
527 } 558 }
528 559
529 if (sky2->duplex == DUPLEX_FULL) 560 if (sky2->duplex == DUPLEX_FULL)
@@ -947,6 +978,12 @@ static int sky2_rx_start(struct sky2_port *sky2)
947 978
948 sky2->rx_put = sky2->rx_next = 0; 979 sky2->rx_put = sky2->rx_next = 0;
949 sky2_qset(hw, rxq); 980 sky2_qset(hw, rxq);
981
982 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
983 /* MAC Rx RAM Read is controlled by hardware */
984 sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
985 }
986
950 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); 987 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
951 988
952 rx_set_checksum(sky2); 989 rx_set_checksum(sky2);
@@ -1029,9 +1066,10 @@ static int sky2_up(struct net_device *dev)
1029 RB_RST_SET); 1066 RB_RST_SET);
1030 1067
1031 sky2_qset(hw, txqaddr[port]); 1068 sky2_qset(hw, txqaddr[port]);
1032 if (hw->chip_id == CHIP_ID_YUKON_EC_U)
1033 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1034 1069
1070 /* Set almost empty threshold */
1071 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1)
1072 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1035 1073
1036 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1074 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1037 TX_RING_SIZE - 1); 1075 TX_RING_SIZE - 1);
@@ -1041,8 +1079,10 @@ static int sky2_up(struct net_device *dev)
1041 goto err_out; 1079 goto err_out;
1042 1080
1043 /* Enable interrupts from phy/mac for port */ 1081 /* Enable interrupts from phy/mac for port */
1082 spin_lock_irq(&hw->hw_lock);
1044 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1083 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
1045 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1084 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1085 spin_unlock_irq(&hw->hw_lock);
1046 return 0; 1086 return 0;
1047 1087
1048err_out: 1088err_out:
@@ -1342,10 +1382,10 @@ static int sky2_down(struct net_device *dev)
1342 netif_stop_queue(dev); 1382 netif_stop_queue(dev);
1343 1383
1344 /* Disable port IRQ */ 1384 /* Disable port IRQ */
1345 local_irq_disable(); 1385 spin_lock_irq(&hw->hw_lock);
1346 hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); 1386 hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
1347 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1387 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1348 local_irq_enable(); 1388 spin_unlock_irq(&hw->hw_lock);
1349 1389
1350 flush_scheduled_work(); 1390 flush_scheduled_work();
1351 1391
@@ -1446,6 +1486,29 @@ static void sky2_link_up(struct sky2_port *sky2)
1446 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); 1486 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1447 1487
1448 reg = gma_read16(hw, port, GM_GP_CTRL); 1488 reg = gma_read16(hw, port, GM_GP_CTRL);
1489 if (sky2->autoneg == AUTONEG_DISABLE) {
1490 reg |= GM_GPCR_AU_ALL_DIS;
1491
1492 /* Is write/read necessary? Copied from sky2_mac_init */
1493 gma_write16(hw, port, GM_GP_CTRL, reg);
1494 gma_read16(hw, port, GM_GP_CTRL);
1495
1496 switch (sky2->speed) {
1497 case SPEED_1000:
1498 reg &= ~GM_GPCR_SPEED_100;
1499 reg |= GM_GPCR_SPEED_1000;
1500 break;
1501 case SPEED_100:
1502 reg &= ~GM_GPCR_SPEED_1000;
1503 reg |= GM_GPCR_SPEED_100;
1504 break;
1505 case SPEED_10:
1506 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1507 break;
1508 }
1509 } else
1510 reg &= ~GM_GPCR_AU_ALL_DIS;
1511
1449 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE) 1512 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
1450 reg |= GM_GPCR_DUP_FULL; 1513 reg |= GM_GPCR_DUP_FULL;
1451 1514
@@ -1604,10 +1667,10 @@ static void sky2_phy_task(void *arg)
1604out: 1667out:
1605 up(&sky2->phy_sema); 1668 up(&sky2->phy_sema);
1606 1669
1607 local_irq_disable(); 1670 spin_lock_irq(&hw->hw_lock);
1608 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2; 1671 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
1609 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1672 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1610 local_irq_enable(); 1673 spin_unlock_irq(&hw->hw_lock);
1611} 1674}
1612 1675
1613 1676
@@ -1834,6 +1897,17 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1834 1897
1835 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 1898 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1836 1899
1900 /*
1901 * Kick the STAT_LEV_TIMER_CTRL timer.
1902 * This fixes my hangs on Yukon-EC (0xb6) rev 1.
1903 * The if clause is there to start the timer only if it has been
1904 * configured correctly and not been disabled via ethtool.
1905 */
1906 if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
1907 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
1908 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
1909 }
1910
1837 hwidx = sky2_read16(hw, STAT_PUT_IDX); 1911 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1838 BUG_ON(hwidx >= STATUS_RING_SIZE); 1912 BUG_ON(hwidx >= STATUS_RING_SIZE);
1839 rmb(); 1913 rmb();
@@ -1916,16 +1990,19 @@ exit_loop:
1916 sky2_tx_check(hw, 0, tx_done[0]); 1990 sky2_tx_check(hw, 0, tx_done[0]);
1917 sky2_tx_check(hw, 1, tx_done[1]); 1991 sky2_tx_check(hw, 1, tx_done[1]);
1918 1992
1993 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
1994 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1995 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1996 }
1997
1919 if (likely(work_done < to_do)) { 1998 if (likely(work_done < to_do)) {
1920 /* need to restart TX timer */ 1999 spin_lock_irq(&hw->hw_lock);
1921 if (is_ec_a1(hw)) { 2000 __netif_rx_complete(dev0);
1922 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1923 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1924 }
1925 2001
1926 netif_rx_complete(dev0);
1927 hw->intr_mask |= Y2_IS_STAT_BMU; 2002 hw->intr_mask |= Y2_IS_STAT_BMU;
1928 sky2_write32(hw, B0_IMSK, hw->intr_mask); 2003 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2004 spin_unlock_irq(&hw->hw_lock);
2005
1929 return 0; 2006 return 0;
1930 } else { 2007 } else {
1931 *budget -= work_done; 2008 *budget -= work_done;
@@ -1988,13 +2065,13 @@ static void sky2_hw_intr(struct sky2_hw *hw)
1988 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2065 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
1989 u16 pci_err; 2066 u16 pci_err;
1990 2067
1991 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); 2068 pci_err = sky2_pci_read16(hw, PCI_STATUS);
1992 if (net_ratelimit()) 2069 if (net_ratelimit())
1993 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", 2070 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
1994 pci_name(hw->pdev), pci_err); 2071 pci_name(hw->pdev), pci_err);
1995 2072
1996 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2073 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1997 pci_write_config_word(hw->pdev, PCI_STATUS, 2074 sky2_pci_write16(hw, PCI_STATUS,
1998 pci_err | PCI_STATUS_ERROR_BITS); 2075 pci_err | PCI_STATUS_ERROR_BITS);
1999 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2076 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2000 } 2077 }
@@ -2003,7 +2080,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2003 /* PCI-Express uncorrectable Error occurred */ 2080 /* PCI-Express uncorrectable Error occurred */
2004 u32 pex_err; 2081 u32 pex_err;
2005 2082
2006 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); 2083 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
2007 2084
2008 if (net_ratelimit()) 2085 if (net_ratelimit())
2009 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2086 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
@@ -2011,7 +2088,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2011 2088
2012 /* clear the interrupt */ 2089 /* clear the interrupt */
2013 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2090 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2014 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, 2091 sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
2015 0xffffffffUL); 2092 0xffffffffUL);
2016 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2093 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2017 2094
@@ -2057,6 +2134,7 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
2057 2134
2058 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); 2135 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
2059 sky2_write32(hw, B0_IMSK, hw->intr_mask); 2136 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2137
2060 schedule_work(&sky2->phy_task); 2138 schedule_work(&sky2->phy_task);
2061} 2139}
2062 2140
@@ -2070,6 +2148,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2070 if (status == 0 || status == ~0) 2148 if (status == 0 || status == ~0)
2071 return IRQ_NONE; 2149 return IRQ_NONE;
2072 2150
2151 spin_lock(&hw->hw_lock);
2073 if (status & Y2_IS_HW_ERR) 2152 if (status & Y2_IS_HW_ERR)
2074 sky2_hw_intr(hw); 2153 sky2_hw_intr(hw);
2075 2154
@@ -2098,7 +2177,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2098 2177
2099 sky2_write32(hw, B0_Y2_SP_ICR, 2); 2178 sky2_write32(hw, B0_Y2_SP_ICR, 2);
2100 2179
2101 sky2_read32(hw, B0_IMSK); 2180 spin_unlock(&hw->hw_lock);
2102 2181
2103 return IRQ_HANDLED; 2182 return IRQ_HANDLED;
2104} 2183}
@@ -2141,7 +2220,7 @@ static int sky2_reset(struct sky2_hw *hw)
2141{ 2220{
2142 u16 status; 2221 u16 status;
2143 u8 t8, pmd_type; 2222 u8 t8, pmd_type;
2144 int i, err; 2223 int i;
2145 2224
2146 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2225 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2147 2226
@@ -2163,25 +2242,18 @@ static int sky2_reset(struct sky2_hw *hw)
2163 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2242 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2164 2243
2165 /* clear PCI errors, if any */ 2244 /* clear PCI errors, if any */
2166 err = pci_read_config_word(hw->pdev, PCI_STATUS, &status); 2245 status = sky2_pci_read16(hw, PCI_STATUS);
2167 if (err)
2168 goto pci_err;
2169 2246
2170 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2247 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2171 err = pci_write_config_word(hw->pdev, PCI_STATUS, 2248 sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS);
2172 status | PCI_STATUS_ERROR_BITS); 2249
2173 if (err)
2174 goto pci_err;
2175 2250
2176 sky2_write8(hw, B0_CTST, CS_MRST_CLR); 2251 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2177 2252
2178 /* clear any PEX errors */ 2253 /* clear any PEX errors */
2179 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) { 2254 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
2180 err = pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, 2255 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2181 0xffffffffUL); 2256
2182 if (err)
2183 goto pci_err;
2184 }
2185 2257
2186 pmd_type = sky2_read8(hw, B2_PMD_TYP); 2258 pmd_type = sky2_read8(hw, B2_PMD_TYP);
2187 hw->copper = !(pmd_type == 'L' || pmd_type == 'S'); 2259 hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
@@ -2280,8 +2352,7 @@ static int sky2_reset(struct sky2_hw *hw)
2280 sky2_write8(hw, STAT_FIFO_ISR_WM, 16); 2352 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2281 2353
2282 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); 2354 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2283 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); 2355 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7));
2284 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2285 } 2356 }
2286 2357
2287 /* enable status unit */ 2358 /* enable status unit */
@@ -2292,14 +2363,6 @@ static int sky2_reset(struct sky2_hw *hw)
2292 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); 2363 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2293 2364
2294 return 0; 2365 return 0;
2295
2296pci_err:
2297 /* This is to catch a BIOS bug workaround where
2298 * mmconfig table doesn't have other buses.
2299 */
2300 printk(KERN_ERR PFX "%s: can't access PCI config space\n",
2301 pci_name(hw->pdev));
2302 return err;
2303} 2366}
2304 2367
2305static u32 sky2_supported_modes(const struct sky2_hw *hw) 2368static u32 sky2_supported_modes(const struct sky2_hw *hw)
@@ -2823,11 +2886,11 @@ static int sky2_set_coalesce(struct net_device *dev,
2823 (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax)) 2886 (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
2824 return -EINVAL; 2887 return -EINVAL;
2825 2888
2826 if (ecmd->tx_max_coalesced_frames > 0xffff) 2889 if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
2827 return -EINVAL; 2890 return -EINVAL;
2828 if (ecmd->rx_max_coalesced_frames > 0xff) 2891 if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
2829 return -EINVAL; 2892 return -EINVAL;
2830 if (ecmd->rx_max_coalesced_frames_irq > 0xff) 2893 if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
2831 return -EINVAL; 2894 return -EINVAL;
2832 2895
2833 if (ecmd->tx_coalesce_usecs == 0) 2896 if (ecmd->tx_coalesce_usecs == 0)
@@ -3169,17 +3232,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3169 } 3232 }
3170 } 3233 }
3171 3234
3172#ifdef __BIG_ENDIAN
3173 /* byte swap descriptors in hardware */
3174 {
3175 u32 reg;
3176
3177 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
3178 reg |= PCI_REV_DESC;
3179 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
3180 }
3181#endif
3182
3183 err = -ENOMEM; 3235 err = -ENOMEM;
3184 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3236 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3185 if (!hw) { 3237 if (!hw) {
@@ -3197,6 +3249,18 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3197 goto err_out_free_hw; 3249 goto err_out_free_hw;
3198 } 3250 }
3199 hw->pm_cap = pm_cap; 3251 hw->pm_cap = pm_cap;
3252 spin_lock_init(&hw->hw_lock);
3253
3254#ifdef __BIG_ENDIAN
3255 /* byte swap descriptors in hardware */
3256 {
3257 u32 reg;
3258
3259 reg = sky2_pci_read32(hw, PCI_DEV_REG2);
3260 reg |= PCI_REV_DESC;
3261 sky2_pci_write32(hw, PCI_DEV_REG2, reg);
3262 }
3263#endif
3200 3264
3201 /* ring for status responses */ 3265 /* ring for status responses */
3202 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES, 3266 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index fd12c289a238..3edb98075e0a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -5,14 +5,22 @@
5#define _SKY2_H 5#define _SKY2_H
6 6
7/* PCI config registers */ 7/* PCI config registers */
8#define PCI_DEV_REG1 0x40 8enum {
9#define PCI_DEV_REG2 0x44 9 PCI_DEV_REG1 = 0x40,
10#define PCI_DEV_STATUS 0x7c 10 PCI_DEV_REG2 = 0x44,
11#define PCI_OS_PCI_X (1<<26) 11 PCI_DEV_STATUS = 0x7c,
12 PCI_DEV_REG3 = 0x80,
13 PCI_DEV_REG4 = 0x84,
14 PCI_DEV_REG5 = 0x88,
15};
12 16
13#define PEX_LNK_STAT 0xf2 17enum {
14#define PEX_UNC_ERR_STAT 0x104 18 PEX_DEV_CAP = 0xe4,
15#define PEX_DEV_CTRL 0xe8 19 PEX_DEV_CTRL = 0xe8,
20 PEX_DEV_STA = 0xea,
21 PEX_LNK_STAT = 0xf2,
22 PEX_UNC_ERR_STAT= 0x104,
23};
16 24
17/* Yukon-2 */ 25/* Yukon-2 */
18enum pci_dev_reg_1 { 26enum pci_dev_reg_1 {
@@ -37,6 +45,25 @@ enum pci_dev_reg_2 {
37 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ 45 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
38}; 46};
39 47
48/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
49enum pci_dev_reg_4 {
50 /* (Link Training & Status State Machine) */
51 P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
52 /* (Active State Power Management) */
53 P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
54 P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */
55 P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */
56 P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */
57
58 P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */
59 P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */
60 P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */
61 P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */
62 P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */
63 P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
64 | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
65};
66
40 67
41#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 68#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
42 PCI_STATUS_SIG_SYSTEM_ERROR | \ 69 PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -507,6 +534,16 @@ enum {
507}; 534};
508#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) 535#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
509 536
537/* Q_F 32 bit Flag Register */
538enum {
539 F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
540 F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
541 F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
542 F_WM_REACHED = 1<<25, /* Watermark reached */
543 F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
544 F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
545 F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
546};
510 547
511/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ 548/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
512enum { 549enum {
@@ -909,10 +946,12 @@ enum {
909 PHY_BCOM_ID1_C0 = 0x6044, 946 PHY_BCOM_ID1_C0 = 0x6044,
910 PHY_BCOM_ID1_C5 = 0x6047, 947 PHY_BCOM_ID1_C5 = 0x6047,
911 948
912 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ 949 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
913 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ 950 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
914 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ 951 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
915 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ 952 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
953 PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */
954 PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */
916}; 955};
917 956
918/* Advertisement register bits */ 957/* Advertisement register bits */
@@ -1837,8 +1876,9 @@ struct sky2_port {
1837struct sky2_hw { 1876struct sky2_hw {
1838 void __iomem *regs; 1877 void __iomem *regs;
1839 struct pci_dev *pdev; 1878 struct pci_dev *pdev;
1840 u32 intr_mask;
1841 struct net_device *dev[2]; 1879 struct net_device *dev[2];
1880 spinlock_t hw_lock;
1881 u32 intr_mask;
1842 1882
1843 int pm_cap; 1883 int pm_cap;
1844 int msi; 1884 int msi;
@@ -1912,4 +1952,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
1912 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); 1952 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
1913 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); 1953 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
1914} 1954}
1955
1956/* PCI config space access */
1957static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
1958{
1959 return sky2_read32(hw, Y2_CFG_SPC + reg);
1960}
1961
1962static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
1963{
1964 return sky2_read16(hw, Y2_CFG_SPC + reg);
1965}
1966
1967static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
1968{
1969 sky2_write32(hw, Y2_CFG_SPC + reg, val);
1970}
1971
1972static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
1973{
1974 sky2_write16(hw, Y2_CFG_SPC + reg, val);
1975}
1915#endif 1976#endif
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e7dc653d5bd6..e8e92c853e89 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9408,6 +9408,15 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9408 return 0; 9408 return 0;
9409 if (venid == PCI_VENDOR_ID_SUN) 9409 if (venid == PCI_VENDOR_ID_SUN)
9410 return 1; 9410 return 1;
9411
9412 /* TG3 chips onboard the SunBlade-2500 don't have the
9413 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9414 * are distinguishable from non-Sun variants by being
9415 * named "network" by the firmware. Non-Sun cards will
9416 * show up as being named "ethernet".
9417 */
9418 if (!strcmp(pcp->prom_name, "network"))
9419 return 1;
9411 } 9420 }
9412 return 0; 9421 return 0;
9413} 9422}
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c2506b56a186..12076f8f942c 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -536,6 +536,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
536 u16 device_id; 536 u16 device_id;
537 int reg, rc = -ENODEV; 537 int reg, rc = -ENODEV;
538 538
539#ifdef CONFIG_PCI
539 if (pdev) { 540 if (pdev) {
540 rc = pci_enable_device(pdev); 541 rc = pci_enable_device(pdev);
541 if (rc) 542 if (rc)
@@ -547,6 +548,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
547 goto err_out; 548 goto err_out;
548 } 549 }
549 } 550 }
551#endif /* CONFIG_PCI */
550 552
551 dev = alloc_etherdev(sizeof(TLanPrivateInfo)); 553 dev = alloc_etherdev(sizeof(TLanPrivateInfo));
552 if (dev == NULL) { 554 if (dev == NULL) {
diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h
index b306c7e4c793..88dfa2e01d6e 100644
--- a/drivers/net/tokenring/smctr.h
+++ b/drivers/net/tokenring/smctr.h
@@ -1042,7 +1042,7 @@ typedef struct net_local {
1042 __u16 functional_address[2]; 1042 __u16 functional_address[2];
1043 __u16 bitwise_group_address[2]; 1043 __u16 bitwise_group_address[2];
1044 1044
1045 __u8 *ptr_ucode; 1045 const __u8 *ptr_ucode;
1046 1046
1047 __u8 cleanup; 1047 __u8 cleanup;
1048 1048
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 30b13a6e6fd9..ed37141319ea 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4636,9 +4636,9 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4636 } 4636 }
4637 4637
4638 default: 4638 default:
4639 IPW_ERROR("Unknown notification: " 4639 IPW_DEBUG_NOTIF("Unknown notification: "
4640 "subtype=%d,flags=0x%2x,size=%d\n", 4640 "subtype=%d,flags=0x%2x,size=%d\n",
4641 notif->subtype, notif->flags, notif->size); 4641 notif->subtype, notif->flags, notif->size);
4642 } 4642 }
4643} 4643}
4644 4644
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index cf373625fc70..98122f3a4bc2 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -950,16 +950,8 @@ wv_82593_cmd(struct net_device * dev,
950static inline int 950static inline int
951wv_diag(struct net_device * dev) 951wv_diag(struct net_device * dev)
952{ 952{
953 int ret = FALSE; 953 return(wv_82593_cmd(dev, "wv_diag(): diagnose",
954 954 OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED));
955 if(wv_82593_cmd(dev, "wv_diag(): diagnose",
956 OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED))
957 ret = TRUE;
958
959#ifdef DEBUG_CONFIG_ERRORS
960 printk(KERN_INFO "wavelan_cs: i82593 Self Test failed!\n");
961#endif
962 return(ret);
963} /* wv_diag */ 955} /* wv_diag */
964 956
965/*------------------------------------------------------------------*/ 957/*------------------------------------------------------------------*/
@@ -3604,8 +3596,8 @@ wv_82593_config(struct net_device * dev)
3604 cfblk.lin_prio = 0; /* conform to 802.3 backoff algoritm */ 3596 cfblk.lin_prio = 0; /* conform to 802.3 backoff algoritm */
3605 cfblk.exp_prio = 5; /* conform to 802.3 backoff algoritm */ 3597 cfblk.exp_prio = 5; /* conform to 802.3 backoff algoritm */
3606 cfblk.bof_met = 1; /* conform to 802.3 backoff algoritm */ 3598 cfblk.bof_met = 1; /* conform to 802.3 backoff algoritm */
3607 cfblk.ifrm_spc = 0x20; /* 32 bit times interframe spacing */ 3599 cfblk.ifrm_spc = 0x20 >> 4; /* 32 bit times interframe spacing */
3608 cfblk.slottim_low = 0x20; /* 32 bit times slot time */ 3600 cfblk.slottim_low = 0x20 >> 5; /* 32 bit times slot time */
3609 cfblk.slottim_hi = 0x0; 3601 cfblk.slottim_hi = 0x0;
3610 cfblk.max_retr = 15; 3602 cfblk.max_retr = 15;
3611 cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */ 3603 cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */