aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-01-10 17:53:49 -0500
committerDavid S. Miller <davem@davemloft.net>2012-01-10 17:53:49 -0500
commitf3e438f0f776024aa3979e717a0f65f85fe9c6f3 (patch)
tree0a636cc5b427e171ab4a04fc889a3f1e6ad961ef /drivers/net
parente4e11180dfa545233e5145919b75b7fac88638df (diff)
parente92b9b3b091d5fcdaed91d6fa9410deae135704b (diff)
Merge branch 'davem-next.via-rhine' of git://violet.fr.zoreil.com/romieu/linux
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/via/via-rhine.c668
1 files changed, 368 insertions, 300 deletions
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 5c4983b2870a..10b18eb63d25 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -39,10 +39,9 @@
39 39
40/* A few user-configurable values. 40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */ 41 These may be modified when a driver module is loaded. */
42 42static int debug = 0;
43#define DEBUG 43#define RHINE_MSG_DEFAULT \
44static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ 44 (0x0000)
45static int max_interrupt_work = 20;
46 45
47/* Set the copy breakpoint for the copy-only-tiny-frames scheme. 46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
48 Setting to > 1518 effectively disables this feature. */ 47 Setting to > 1518 effectively disables this feature. */
@@ -128,12 +127,10 @@ MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL"); 128MODULE_LICENSE("GPL");
130 129
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0); 130module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0); 131module_param(rx_copybreak, int, 0);
134module_param(avoid_D3, bool, 0); 132module_param(avoid_D3, bool, 0);
135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); 133MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 134MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 135MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
139 136
@@ -351,16 +348,25 @@ static const int mmio_verify_registers[] = {
351 348
352/* Bits in the interrupt status/mask registers. */ 349/* Bits in the interrupt status/mask registers. */
353enum intr_status_bits { 350enum intr_status_bits {
354 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020, 351 IntrRxDone = 0x0001,
355 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210, 352 IntrTxDone = 0x0002,
356 IntrPCIErr=0x0040, 353 IntrRxErr = 0x0004,
357 IntrStatsMax=0x0080, IntrRxEarly=0x0100, 354 IntrTxError = 0x0008,
358 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000, 355 IntrRxEmpty = 0x0020,
359 IntrTxAborted=0x2000, IntrLinkChange=0x4000, 356 IntrPCIErr = 0x0040,
360 IntrRxWakeUp=0x8000, 357 IntrStatsMax = 0x0080,
361 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260, 358 IntrRxEarly = 0x0100,
362 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */ 359 IntrTxUnderrun = 0x0210,
363 IntrTxErrSummary=0x082218, 360 IntrRxOverflow = 0x0400,
361 IntrRxDropped = 0x0800,
362 IntrRxNoBuf = 0x1000,
363 IntrTxAborted = 0x2000,
364 IntrLinkChange = 0x4000,
365 IntrRxWakeUp = 0x8000,
366 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
367 IntrNormalSummary = IntrRxDone | IntrTxDone,
368 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
369 IntrTxUnderrun,
364}; 370};
365 371
366/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ 372/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
@@ -439,8 +445,13 @@ struct rhine_private {
439 struct net_device *dev; 445 struct net_device *dev;
440 struct napi_struct napi; 446 struct napi_struct napi;
441 spinlock_t lock; 447 spinlock_t lock;
448 struct mutex task_lock;
449 bool task_enable;
450 struct work_struct slow_event_task;
442 struct work_struct reset_task; 451 struct work_struct reset_task;
443 452
453 u32 msg_enable;
454
444 /* Frequently used values: keep some adjacent for cache effect. */ 455 /* Frequently used values: keep some adjacent for cache effect. */
445 u32 quirks; 456 u32 quirks;
446 struct rx_desc *rx_head_desc; 457 struct rx_desc *rx_head_desc;
@@ -476,41 +487,50 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
476static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 487static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
477static int rhine_open(struct net_device *dev); 488static int rhine_open(struct net_device *dev);
478static void rhine_reset_task(struct work_struct *work); 489static void rhine_reset_task(struct work_struct *work);
490static void rhine_slow_event_task(struct work_struct *work);
479static void rhine_tx_timeout(struct net_device *dev); 491static void rhine_tx_timeout(struct net_device *dev);
480static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 492static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
481 struct net_device *dev); 493 struct net_device *dev);
482static irqreturn_t rhine_interrupt(int irq, void *dev_instance); 494static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
483static void rhine_tx(struct net_device *dev); 495static void rhine_tx(struct net_device *dev);
484static int rhine_rx(struct net_device *dev, int limit); 496static int rhine_rx(struct net_device *dev, int limit);
485static void rhine_error(struct net_device *dev, int intr_status);
486static void rhine_set_rx_mode(struct net_device *dev); 497static void rhine_set_rx_mode(struct net_device *dev);
487static struct net_device_stats *rhine_get_stats(struct net_device *dev); 498static struct net_device_stats *rhine_get_stats(struct net_device *dev);
488static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 499static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
489static const struct ethtool_ops netdev_ethtool_ops; 500static const struct ethtool_ops netdev_ethtool_ops;
490static int rhine_close(struct net_device *dev); 501static int rhine_close(struct net_device *dev);
491static void rhine_shutdown (struct pci_dev *pdev);
492static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); 502static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
493static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); 503static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
494static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr); 504static void rhine_restart_tx(struct net_device *dev);
495static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr); 505
496static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask); 506static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool high)
497static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask); 507{
498static void rhine_init_cam_filter(struct net_device *dev); 508 void __iomem *ioaddr = rp->base;
499static void rhine_update_vcam(struct net_device *dev); 509 int i;
500 510
501#define RHINE_WAIT_FOR(condition) \ 511 for (i = 0; i < 1024; i++) {
502do { \ 512 if (high ^ !!(ioread8(ioaddr + reg) & mask))
503 int i = 1024; \ 513 break;
504 while (!(condition) && --i) \ 514 udelay(10);
505 ; \ 515 }
506 if (debug > 1 && i < 512) \ 516 if (i > 64) {
507 pr_info("%4d cycles used @ %s:%d\n", \ 517 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
508 1024 - i, __func__, __LINE__); \ 518 "count: %04d\n", high ? "high" : "low", reg, mask, i);
509} while (0) 519 }
510 520}
511static inline u32 get_intr_status(struct net_device *dev) 521
522static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
523{
524 rhine_wait_bit(rp, reg, mask, true);
525}
526
527static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
528{
529 rhine_wait_bit(rp, reg, mask, false);
530}
531
532static u32 rhine_get_events(struct rhine_private *rp)
512{ 533{
513 struct rhine_private *rp = netdev_priv(dev);
514 void __iomem *ioaddr = rp->base; 534 void __iomem *ioaddr = rp->base;
515 u32 intr_status; 535 u32 intr_status;
516 536
@@ -521,6 +541,16 @@ static inline u32 get_intr_status(struct net_device *dev)
521 return intr_status; 541 return intr_status;
522} 542}
523 543
544static void rhine_ack_events(struct rhine_private *rp, u32 mask)
545{
546 void __iomem *ioaddr = rp->base;
547
548 if (rp->quirks & rqStatusWBRace)
549 iowrite8(mask >> 16, ioaddr + IntrStatus2);
550 iowrite16(mask, ioaddr + IntrStatus);
551 mmiowb();
552}
553
524/* 554/*
525 * Get power related registers into sane state. 555 * Get power related registers into sane state.
526 * Notify user about past WOL event. 556 * Notify user about past WOL event.
@@ -585,6 +615,7 @@ static void rhine_chip_reset(struct net_device *dev)
585{ 615{
586 struct rhine_private *rp = netdev_priv(dev); 616 struct rhine_private *rp = netdev_priv(dev);
587 void __iomem *ioaddr = rp->base; 617 void __iomem *ioaddr = rp->base;
618 u8 cmd1;
588 619
589 iowrite8(Cmd1Reset, ioaddr + ChipCmd1); 620 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
590 IOSYNC; 621 IOSYNC;
@@ -597,13 +628,12 @@ static void rhine_chip_reset(struct net_device *dev)
597 iowrite8(0x40, ioaddr + MiscCmd); 628 iowrite8(0x40, ioaddr + MiscCmd);
598 629
599 /* Reset can take somewhat longer (rare) */ 630 /* Reset can take somewhat longer (rare) */
600 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset)); 631 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
601 } 632 }
602 633
603 if (debug > 1) 634 cmd1 = ioread8(ioaddr + ChipCmd1);
604 netdev_info(dev, "Reset %s\n", 635 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
605 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ? 636 "failed" : "succeeded");
606 "failed" : "succeeded");
607} 637}
608 638
609#ifdef USE_MMIO 639#ifdef USE_MMIO
@@ -629,9 +659,15 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
629{ 659{
630 struct rhine_private *rp = netdev_priv(dev); 660 struct rhine_private *rp = netdev_priv(dev);
631 void __iomem *ioaddr = rp->base; 661 void __iomem *ioaddr = rp->base;
662 int i;
632 663
633 outb(0x20, pioaddr + MACRegEEcsr); 664 outb(0x20, pioaddr + MACRegEEcsr);
634 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20)); 665 for (i = 0; i < 1024; i++) {
666 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
667 break;
668 }
669 if (i > 512)
670 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
635 671
636#ifdef USE_MMIO 672#ifdef USE_MMIO
637 /* 673 /*
@@ -657,23 +693,127 @@ static void rhine_poll(struct net_device *dev)
657} 693}
658#endif 694#endif
659 695
696static void rhine_kick_tx_threshold(struct rhine_private *rp)
697{
698 if (rp->tx_thresh < 0xe0) {
699 void __iomem *ioaddr = rp->base;
700
701 rp->tx_thresh += 0x20;
702 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
703 }
704}
705
706static void rhine_tx_err(struct rhine_private *rp, u32 status)
707{
708 struct net_device *dev = rp->dev;
709
710 if (status & IntrTxAborted) {
711 netif_info(rp, tx_err, dev,
712 "Abort %08x, frame dropped\n", status);
713 }
714
715 if (status & IntrTxUnderrun) {
716 rhine_kick_tx_threshold(rp);
717 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
718 "Tx threshold now %02x\n", rp->tx_thresh);
719 }
720
721 if (status & IntrTxDescRace)
722 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
723
724 if ((status & IntrTxError) &&
725 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
726 rhine_kick_tx_threshold(rp);
727 netif_info(rp, tx_err, dev, "Unspecified error. "
728 "Tx threshold now %02x\n", rp->tx_thresh);
729 }
730
731 rhine_restart_tx(dev);
732}
733
734static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
735{
736 void __iomem *ioaddr = rp->base;
737 struct net_device_stats *stats = &rp->dev->stats;
738
739 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
740 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
741
742 /*
743 * Clears the "tally counters" for CRC errors and missed frames(?).
744 * It has been reported that some chips need a write of 0 to clear
745 * these, for others the counters are set to 1 when written to and
746 * instead cleared when read. So we clear them both ways ...
747 */
748 iowrite32(0, ioaddr + RxMissed);
749 ioread16(ioaddr + RxCRCErrs);
750 ioread16(ioaddr + RxMissed);
751}
752
753#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
754 IntrRxErr | \
755 IntrRxEmpty | \
756 IntrRxOverflow | \
757 IntrRxDropped | \
758 IntrRxNoBuf | \
759 IntrRxWakeUp)
760
761#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
762 IntrTxAborted | \
763 IntrTxUnderrun | \
764 IntrTxDescRace)
765#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
766
767#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
768 RHINE_EVENT_NAPI_TX | \
769 IntrStatsMax)
770#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
771#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
772
660static int rhine_napipoll(struct napi_struct *napi, int budget) 773static int rhine_napipoll(struct napi_struct *napi, int budget)
661{ 774{
662 struct rhine_private *rp = container_of(napi, struct rhine_private, napi); 775 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
663 struct net_device *dev = rp->dev; 776 struct net_device *dev = rp->dev;
664 void __iomem *ioaddr = rp->base; 777 void __iomem *ioaddr = rp->base;
665 int work_done; 778 u16 enable_mask = RHINE_EVENT & 0xffff;
779 int work_done = 0;
780 u32 status;
781
782 status = rhine_get_events(rp);
783 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
784
785 if (status & RHINE_EVENT_NAPI_RX)
786 work_done += rhine_rx(dev, budget);
787
788 if (status & RHINE_EVENT_NAPI_TX) {
789 if (status & RHINE_EVENT_NAPI_TX_ERR) {
790 /* Avoid scavenging before Tx engine turned off */
791 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
792 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
793 netif_warn(rp, tx_err, dev, "Tx still on\n");
794 }
666 795
667 work_done = rhine_rx(dev, budget); 796 rhine_tx(dev);
797
798 if (status & RHINE_EVENT_NAPI_TX_ERR)
799 rhine_tx_err(rp, status);
800 }
801
802 if (status & IntrStatsMax) {
803 spin_lock(&rp->lock);
804 rhine_update_rx_crc_and_missed_errord(rp);
805 spin_unlock(&rp->lock);
806 }
807
808 if (status & RHINE_EVENT_SLOW) {
809 enable_mask &= ~RHINE_EVENT_SLOW;
810 schedule_work(&rp->slow_event_task);
811 }
668 812
669 if (work_done < budget) { 813 if (work_done < budget) {
670 napi_complete(napi); 814 napi_complete(napi);
671 815 iowrite16(enable_mask, ioaddr + IntrEnable);
672 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 816 mmiowb();
673 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
674 IntrTxDone | IntrTxError | IntrTxUnderrun |
675 IntrPCIErr | IntrStatsMax | IntrLinkChange,
676 ioaddr + IntrEnable);
677 } 817 }
678 return work_done; 818 return work_done;
679} 819}
@@ -797,6 +937,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
797 rp->quirks = quirks; 937 rp->quirks = quirks;
798 rp->pioaddr = pioaddr; 938 rp->pioaddr = pioaddr;
799 rp->pdev = pdev; 939 rp->pdev = pdev;
940 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
800 941
801 rc = pci_request_regions(pdev, DRV_NAME); 942 rc = pci_request_regions(pdev, DRV_NAME);
802 if (rc) 943 if (rc)
@@ -856,7 +997,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
856 dev->irq = pdev->irq; 997 dev->irq = pdev->irq;
857 998
858 spin_lock_init(&rp->lock); 999 spin_lock_init(&rp->lock);
1000 mutex_init(&rp->task_lock);
859 INIT_WORK(&rp->reset_task, rhine_reset_task); 1001 INIT_WORK(&rp->reset_task, rhine_reset_task);
1002 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
860 1003
861 rp->mii_if.dev = dev; 1004 rp->mii_if.dev = dev;
862 rp->mii_if.mdio_read = mdio_read; 1005 rp->mii_if.mdio_read = mdio_read;
@@ -916,8 +1059,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
916 } 1059 }
917 } 1060 }
918 rp->mii_if.phy_id = phy_id; 1061 rp->mii_if.phy_id = phy_id;
919 if (debug > 1 && avoid_D3) 1062 if (avoid_D3)
920 netdev_info(dev, "No D3 power state at shutdown\n"); 1063 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
921 1064
922 return 0; 1065 return 0;
923 1066
@@ -1093,7 +1236,7 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1093 struct rhine_private *rp = netdev_priv(dev); 1236 struct rhine_private *rp = netdev_priv(dev);
1094 void __iomem *ioaddr = rp->base; 1237 void __iomem *ioaddr = rp->base;
1095 1238
1096 mii_check_media(&rp->mii_if, debug, init_media); 1239 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1097 1240
1098 if (rp->mii_if.full_duplex) 1241 if (rp->mii_if.full_duplex)
1099 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, 1242 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
@@ -1101,24 +1244,26 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1101 else 1244 else
1102 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, 1245 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1103 ioaddr + ChipCmd1); 1246 ioaddr + ChipCmd1);
1104 if (debug > 1) 1247
1105 netdev_info(dev, "force_media %d, carrier %d\n", 1248 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1106 rp->mii_if.force_media, netif_carrier_ok(dev)); 1249 rp->mii_if.force_media, netif_carrier_ok(dev));
1107} 1250}
1108 1251
1109/* Called after status of force_media possibly changed */ 1252/* Called after status of force_media possibly changed */
1110static void rhine_set_carrier(struct mii_if_info *mii) 1253static void rhine_set_carrier(struct mii_if_info *mii)
1111{ 1254{
1255 struct net_device *dev = mii->dev;
1256 struct rhine_private *rp = netdev_priv(dev);
1257
1112 if (mii->force_media) { 1258 if (mii->force_media) {
1113 /* autoneg is off: Link is always assumed to be up */ 1259 /* autoneg is off: Link is always assumed to be up */
1114 if (!netif_carrier_ok(mii->dev)) 1260 if (!netif_carrier_ok(dev))
1115 netif_carrier_on(mii->dev); 1261 netif_carrier_on(dev);
1116 } 1262 } else /* Let MMI library update carrier status */
1117 else /* Let MMI library update carrier status */ 1263 rhine_check_media(dev, 0);
1118 rhine_check_media(mii->dev, 0); 1264
1119 if (debug > 1) 1265 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1120 netdev_info(mii->dev, "force_media %d, carrier %d\n", 1266 mii->force_media, netif_carrier_ok(dev));
1121 mii->force_media, netif_carrier_ok(mii->dev));
1122} 1267}
1123 1268
1124/** 1269/**
@@ -1266,10 +1411,10 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1266{ 1411{
1267 struct rhine_private *rp = netdev_priv(dev); 1412 struct rhine_private *rp = netdev_priv(dev);
1268 1413
1269 spin_lock_irq(&rp->lock); 1414 spin_lock_bh(&rp->lock);
1270 set_bit(vid, rp->active_vlans); 1415 set_bit(vid, rp->active_vlans);
1271 rhine_update_vcam(dev); 1416 rhine_update_vcam(dev);
1272 spin_unlock_irq(&rp->lock); 1417 spin_unlock_bh(&rp->lock);
1273 return 0; 1418 return 0;
1274} 1419}
1275 1420
@@ -1277,10 +1422,10 @@ static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1277{ 1422{
1278 struct rhine_private *rp = netdev_priv(dev); 1423 struct rhine_private *rp = netdev_priv(dev);
1279 1424
1280 spin_lock_irq(&rp->lock); 1425 spin_lock_bh(&rp->lock);
1281 clear_bit(vid, rp->active_vlans); 1426 clear_bit(vid, rp->active_vlans);
1282 rhine_update_vcam(dev); 1427 rhine_update_vcam(dev);
1283 spin_unlock_irq(&rp->lock); 1428 spin_unlock_bh(&rp->lock);
1284 return 0; 1429 return 0;
1285} 1430}
1286 1431
@@ -1310,12 +1455,7 @@ static void init_registers(struct net_device *dev)
1310 1455
1311 napi_enable(&rp->napi); 1456 napi_enable(&rp->napi);
1312 1457
1313 /* Enable interrupts by setting the interrupt mask. */ 1458 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1314 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1315 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1316 IntrTxDone | IntrTxError | IntrTxUnderrun |
1317 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1318 ioaddr + IntrEnable);
1319 1459
1320 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), 1460 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1321 ioaddr + ChipCmd); 1461 ioaddr + ChipCmd);
@@ -1323,23 +1463,27 @@ static void init_registers(struct net_device *dev)
1323} 1463}
1324 1464
1325/* Enable MII link status auto-polling (required for IntrLinkChange) */ 1465/* Enable MII link status auto-polling (required for IntrLinkChange) */
1326static void rhine_enable_linkmon(void __iomem *ioaddr) 1466static void rhine_enable_linkmon(struct rhine_private *rp)
1327{ 1467{
1468 void __iomem *ioaddr = rp->base;
1469
1328 iowrite8(0, ioaddr + MIICmd); 1470 iowrite8(0, ioaddr + MIICmd);
1329 iowrite8(MII_BMSR, ioaddr + MIIRegAddr); 1471 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1330 iowrite8(0x80, ioaddr + MIICmd); 1472 iowrite8(0x80, ioaddr + MIICmd);
1331 1473
1332 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20)); 1474 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1333 1475
1334 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); 1476 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1335} 1477}
1336 1478
1337/* Disable MII link status auto-polling (required for MDIO access) */ 1479/* Disable MII link status auto-polling (required for MDIO access) */
1338static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) 1480static void rhine_disable_linkmon(struct rhine_private *rp)
1339{ 1481{
1482 void __iomem *ioaddr = rp->base;
1483
1340 iowrite8(0, ioaddr + MIICmd); 1484 iowrite8(0, ioaddr + MIICmd);
1341 1485
1342 if (quirks & rqRhineI) { 1486 if (rp->quirks & rqRhineI) {
1343 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1487 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1344 1488
1345 /* Can be called from ISR. Evil. */ 1489 /* Can be called from ISR. Evil. */
@@ -1348,13 +1492,13 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1348 /* 0x80 must be set immediately before turning it off */ 1492 /* 0x80 must be set immediately before turning it off */
1349 iowrite8(0x80, ioaddr + MIICmd); 1493 iowrite8(0x80, ioaddr + MIICmd);
1350 1494
1351 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20); 1495 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1352 1496
1353 /* Heh. Now clear 0x80 again. */ 1497 /* Heh. Now clear 0x80 again. */
1354 iowrite8(0, ioaddr + MIICmd); 1498 iowrite8(0, ioaddr + MIICmd);
1355 } 1499 }
1356 else 1500 else
1357 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80); 1501 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1358} 1502}
1359 1503
1360/* Read and write over the MII Management Data I/O (MDIO) interface. */ 1504/* Read and write over the MII Management Data I/O (MDIO) interface. */
@@ -1365,16 +1509,16 @@ static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1365 void __iomem *ioaddr = rp->base; 1509 void __iomem *ioaddr = rp->base;
1366 int result; 1510 int result;
1367 1511
1368 rhine_disable_linkmon(ioaddr, rp->quirks); 1512 rhine_disable_linkmon(rp);
1369 1513
1370 /* rhine_disable_linkmon already cleared MIICmd */ 1514 /* rhine_disable_linkmon already cleared MIICmd */
1371 iowrite8(phy_id, ioaddr + MIIPhyAddr); 1515 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1372 iowrite8(regnum, ioaddr + MIIRegAddr); 1516 iowrite8(regnum, ioaddr + MIIRegAddr);
1373 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ 1517 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1374 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40)); 1518 rhine_wait_bit_low(rp, MIICmd, 0x40);
1375 result = ioread16(ioaddr + MIIData); 1519 result = ioread16(ioaddr + MIIData);
1376 1520
1377 rhine_enable_linkmon(ioaddr); 1521 rhine_enable_linkmon(rp);
1378 return result; 1522 return result;
1379} 1523}
1380 1524
@@ -1383,16 +1527,33 @@ static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value
1383 struct rhine_private *rp = netdev_priv(dev); 1527 struct rhine_private *rp = netdev_priv(dev);
1384 void __iomem *ioaddr = rp->base; 1528 void __iomem *ioaddr = rp->base;
1385 1529
1386 rhine_disable_linkmon(ioaddr, rp->quirks); 1530 rhine_disable_linkmon(rp);
1387 1531
1388 /* rhine_disable_linkmon already cleared MIICmd */ 1532 /* rhine_disable_linkmon already cleared MIICmd */
1389 iowrite8(phy_id, ioaddr + MIIPhyAddr); 1533 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1390 iowrite8(regnum, ioaddr + MIIRegAddr); 1534 iowrite8(regnum, ioaddr + MIIRegAddr);
1391 iowrite16(value, ioaddr + MIIData); 1535 iowrite16(value, ioaddr + MIIData);
1392 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ 1536 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1393 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20)); 1537 rhine_wait_bit_low(rp, MIICmd, 0x20);
1394 1538
1395 rhine_enable_linkmon(ioaddr); 1539 rhine_enable_linkmon(rp);
1540}
1541
1542static void rhine_task_disable(struct rhine_private *rp)
1543{
1544 mutex_lock(&rp->task_lock);
1545 rp->task_enable = false;
1546 mutex_unlock(&rp->task_lock);
1547
1548 cancel_work_sync(&rp->slow_event_task);
1549 cancel_work_sync(&rp->reset_task);
1550}
1551
1552static void rhine_task_enable(struct rhine_private *rp)
1553{
1554 mutex_lock(&rp->task_lock);
1555 rp->task_enable = true;
1556 mutex_unlock(&rp->task_lock);
1396} 1557}
1397 1558
1398static int rhine_open(struct net_device *dev) 1559static int rhine_open(struct net_device *dev)
@@ -1406,8 +1567,7 @@ static int rhine_open(struct net_device *dev)
1406 if (rc) 1567 if (rc)
1407 return rc; 1568 return rc;
1408 1569
1409 if (debug > 1) 1570 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1410 netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1411 1571
1412 rc = alloc_ring(dev); 1572 rc = alloc_ring(dev);
1413 if (rc) { 1573 if (rc) {
@@ -1417,11 +1577,12 @@ static int rhine_open(struct net_device *dev)
1417 alloc_rbufs(dev); 1577 alloc_rbufs(dev);
1418 alloc_tbufs(dev); 1578 alloc_tbufs(dev);
1419 rhine_chip_reset(dev); 1579 rhine_chip_reset(dev);
1580 rhine_task_enable(rp);
1420 init_registers(dev); 1581 init_registers(dev);
1421 if (debug > 2) 1582
1422 netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n", 1583 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1423 __func__, ioread16(ioaddr + ChipCmd), 1584 __func__, ioread16(ioaddr + ChipCmd),
1424 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1585 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1425 1586
1426 netif_start_queue(dev); 1587 netif_start_queue(dev);
1427 1588
@@ -1434,11 +1595,12 @@ static void rhine_reset_task(struct work_struct *work)
1434 reset_task); 1595 reset_task);
1435 struct net_device *dev = rp->dev; 1596 struct net_device *dev = rp->dev;
1436 1597
1437 /* protect against concurrent rx interrupts */ 1598 mutex_lock(&rp->task_lock);
1438 disable_irq(rp->pdev->irq);
1439 1599
1440 napi_disable(&rp->napi); 1600 if (!rp->task_enable)
1601 goto out_unlock;
1441 1602
1603 napi_disable(&rp->napi);
1442 spin_lock_bh(&rp->lock); 1604 spin_lock_bh(&rp->lock);
1443 1605
1444 /* clear all descriptors */ 1606 /* clear all descriptors */
@@ -1452,11 +1614,13 @@ static void rhine_reset_task(struct work_struct *work)
1452 init_registers(dev); 1614 init_registers(dev);
1453 1615
1454 spin_unlock_bh(&rp->lock); 1616 spin_unlock_bh(&rp->lock);
1455 enable_irq(rp->pdev->irq);
1456 1617
1457 dev->trans_start = jiffies; /* prevent tx timeout */ 1618 dev->trans_start = jiffies; /* prevent tx timeout */
1458 dev->stats.tx_errors++; 1619 dev->stats.tx_errors++;
1459 netif_wake_queue(dev); 1620 netif_wake_queue(dev);
1621
1622out_unlock:
1623 mutex_unlock(&rp->task_lock);
1460} 1624}
1461 1625
1462static void rhine_tx_timeout(struct net_device *dev) 1626static void rhine_tx_timeout(struct net_device *dev)
@@ -1477,7 +1641,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1477 struct rhine_private *rp = netdev_priv(dev); 1641 struct rhine_private *rp = netdev_priv(dev);
1478 void __iomem *ioaddr = rp->base; 1642 void __iomem *ioaddr = rp->base;
1479 unsigned entry; 1643 unsigned entry;
1480 unsigned long flags;
1481 1644
1482 /* Caution: the write order is important here, set the field 1645 /* Caution: the write order is important here, set the field
1483 with the "ownership" bits last. */ 1646 with the "ownership" bits last. */
@@ -1529,7 +1692,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1529 rp->tx_ring[entry].tx_status = 0; 1692 rp->tx_ring[entry].tx_status = 0;
1530 1693
1531 /* lock eth irq */ 1694 /* lock eth irq */
1532 spin_lock_irqsave(&rp->lock, flags);
1533 wmb(); 1695 wmb();
1534 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); 1696 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1535 wmb(); 1697 wmb();
@@ -1550,78 +1712,43 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1550 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) 1712 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1551 netif_stop_queue(dev); 1713 netif_stop_queue(dev);
1552 1714
1553 spin_unlock_irqrestore(&rp->lock, flags); 1715 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1716 rp->cur_tx - 1, entry);
1554 1717
1555 if (debug > 4) {
1556 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1557 rp->cur_tx-1, entry);
1558 }
1559 return NETDEV_TX_OK; 1718 return NETDEV_TX_OK;
1560} 1719}
1561 1720
1721static void rhine_irq_disable(struct rhine_private *rp)
1722{
1723 iowrite16(0x0000, rp->base + IntrEnable);
1724 mmiowb();
1725}
1726
1562/* The interrupt handler does all of the Rx thread work and cleans up 1727/* The interrupt handler does all of the Rx thread work and cleans up
1563 after the Tx thread. */ 1728 after the Tx thread. */
1564static irqreturn_t rhine_interrupt(int irq, void *dev_instance) 1729static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1565{ 1730{
1566 struct net_device *dev = dev_instance; 1731 struct net_device *dev = dev_instance;
1567 struct rhine_private *rp = netdev_priv(dev); 1732 struct rhine_private *rp = netdev_priv(dev);
1568 void __iomem *ioaddr = rp->base; 1733 u32 status;
1569 u32 intr_status;
1570 int boguscnt = max_interrupt_work;
1571 int handled = 0; 1734 int handled = 0;
1572 1735
1573 while ((intr_status = get_intr_status(dev))) { 1736 status = rhine_get_events(rp);
1574 handled = 1;
1575
1576 /* Acknowledge all of the current interrupt sources ASAP. */
1577 if (intr_status & IntrTxDescRace)
1578 iowrite8(0x08, ioaddr + IntrStatus2);
1579 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1580 IOSYNC;
1581 1737
1582 if (debug > 4) 1738 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1583 netdev_dbg(dev, "Interrupt, status %08x\n",
1584 intr_status);
1585
1586 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1587 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1588 iowrite16(IntrTxAborted |
1589 IntrTxDone | IntrTxError | IntrTxUnderrun |
1590 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1591 ioaddr + IntrEnable);
1592
1593 napi_schedule(&rp->napi);
1594 }
1595 1739
1596 if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1740 if (status & RHINE_EVENT) {
1597 if (intr_status & IntrTxErrSummary) { 1741 handled = 1;
1598 /* Avoid scavenging before Tx engine turned off */
1599 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1600 if (debug > 2 &&
1601 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1602 netdev_warn(dev,
1603 "%s: Tx engine still on\n",
1604 __func__);
1605 }
1606 rhine_tx(dev);
1607 }
1608 1742
1609 /* Abnormal error summary/uncommon events handlers. */ 1743 rhine_irq_disable(rp);
1610 if (intr_status & (IntrPCIErr | IntrLinkChange | 1744 napi_schedule(&rp->napi);
1611 IntrStatsMax | IntrTxError | IntrTxAborted | 1745 }
1612 IntrTxUnderrun | IntrTxDescRace))
1613 rhine_error(dev, intr_status);
1614 1746
1615 if (--boguscnt < 0) { 1747 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1616 netdev_warn(dev, "Too much work at interrupt, status=%#08x\n", 1748 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1617 intr_status); 1749 status);
1618 break;
1619 }
1620 } 1750 }
1621 1751
1622 if (debug > 3)
1623 netdev_dbg(dev, "exiting interrupt, status=%08x\n",
1624 ioread16(ioaddr + IntrStatus));
1625 return IRQ_RETVAL(handled); 1752 return IRQ_RETVAL(handled);
1626} 1753}
1627 1754
@@ -1632,20 +1759,16 @@ static void rhine_tx(struct net_device *dev)
1632 struct rhine_private *rp = netdev_priv(dev); 1759 struct rhine_private *rp = netdev_priv(dev);
1633 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; 1760 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1634 1761
1635 spin_lock(&rp->lock);
1636
1637 /* find and cleanup dirty tx descriptors */ 1762 /* find and cleanup dirty tx descriptors */
1638 while (rp->dirty_tx != rp->cur_tx) { 1763 while (rp->dirty_tx != rp->cur_tx) {
1639 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); 1764 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1640 if (debug > 6) 1765 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1641 netdev_dbg(dev, "Tx scavenge %d status %08x\n", 1766 entry, txstatus);
1642 entry, txstatus);
1643 if (txstatus & DescOwn) 1767 if (txstatus & DescOwn)
1644 break; 1768 break;
1645 if (txstatus & 0x8000) { 1769 if (txstatus & 0x8000) {
1646 if (debug > 1) 1770 netif_dbg(rp, tx_done, dev,
1647 netdev_dbg(dev, "Transmit error, Tx status %08x\n", 1771 "Transmit error, Tx status %08x\n", txstatus);
1648 txstatus);
1649 dev->stats.tx_errors++; 1772 dev->stats.tx_errors++;
1650 if (txstatus & 0x0400) 1773 if (txstatus & 0x0400)
1651 dev->stats.tx_carrier_errors++; 1774 dev->stats.tx_carrier_errors++;
@@ -1667,10 +1790,8 @@ static void rhine_tx(struct net_device *dev)
1667 dev->stats.collisions += (txstatus >> 3) & 0x0F; 1790 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1668 else 1791 else
1669 dev->stats.collisions += txstatus & 0x0F; 1792 dev->stats.collisions += txstatus & 0x0F;
1670 if (debug > 6) 1793 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1671 netdev_dbg(dev, "collisions: %1.1x:%1.1x\n", 1794 (txstatus >> 3) & 0xF, txstatus & 0xF);
1672 (txstatus >> 3) & 0xF,
1673 txstatus & 0xF);
1674 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len; 1795 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1675 dev->stats.tx_packets++; 1796 dev->stats.tx_packets++;
1676 } 1797 }
@@ -1687,8 +1808,6 @@ static void rhine_tx(struct net_device *dev)
1687 } 1808 }
1688 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) 1809 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1689 netif_wake_queue(dev); 1810 netif_wake_queue(dev);
1690
1691 spin_unlock(&rp->lock);
1692} 1811}
1693 1812
1694/** 1813/**
@@ -1713,11 +1832,8 @@ static int rhine_rx(struct net_device *dev, int limit)
1713 int count; 1832 int count;
1714 int entry = rp->cur_rx % RX_RING_SIZE; 1833 int entry = rp->cur_rx % RX_RING_SIZE;
1715 1834
1716 if (debug > 4) { 1835 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1717 netdev_dbg(dev, "%s(), entry %d status %08x\n", 1836 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1718 __func__, entry,
1719 le32_to_cpu(rp->rx_head_desc->rx_status));
1720 }
1721 1837
1722 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1838 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1723 for (count = 0; count < limit; ++count) { 1839 for (count = 0; count < limit; ++count) {
@@ -1729,9 +1845,8 @@ static int rhine_rx(struct net_device *dev, int limit)
1729 if (desc_status & DescOwn) 1845 if (desc_status & DescOwn)
1730 break; 1846 break;
1731 1847
1732 if (debug > 4) 1848 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1733 netdev_dbg(dev, "%s() status is %08x\n", 1849 desc_status);
1734 __func__, desc_status);
1735 1850
1736 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 1851 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1737 if ((desc_status & RxWholePkt) != RxWholePkt) { 1852 if ((desc_status & RxWholePkt) != RxWholePkt) {
@@ -1747,9 +1862,9 @@ static int rhine_rx(struct net_device *dev, int limit)
1747 dev->stats.rx_length_errors++; 1862 dev->stats.rx_length_errors++;
1748 } else if (desc_status & RxErr) { 1863 } else if (desc_status & RxErr) {
1749 /* There was a error. */ 1864 /* There was a error. */
1750 if (debug > 2) 1865 netif_dbg(rp, rx_err, dev,
1751 netdev_dbg(dev, "%s() Rx error was %08x\n", 1866 "%s() Rx error %08x\n", __func__,
1752 __func__, desc_status); 1867 desc_status);
1753 dev->stats.rx_errors++; 1868 dev->stats.rx_errors++;
1754 if (desc_status & 0x0030) 1869 if (desc_status & 0x0030)
1755 dev->stats.rx_length_errors++; 1870 dev->stats.rx_length_errors++;
@@ -1839,19 +1954,6 @@ static int rhine_rx(struct net_device *dev, int limit)
1839 return count; 1954 return count;
1840} 1955}
1841 1956
1842/*
1843 * Clears the "tally counters" for CRC errors and missed frames(?).
1844 * It has been reported that some chips need a write of 0 to clear
1845 * these, for others the counters are set to 1 when written to and
1846 * instead cleared when read. So we clear them both ways ...
1847 */
1848static inline void clear_tally_counters(void __iomem *ioaddr)
1849{
1850 iowrite32(0, ioaddr + RxMissed);
1851 ioread16(ioaddr + RxCRCErrs);
1852 ioread16(ioaddr + RxMissed);
1853}
1854
1855static void rhine_restart_tx(struct net_device *dev) { 1957static void rhine_restart_tx(struct net_device *dev) {
1856 struct rhine_private *rp = netdev_priv(dev); 1958 struct rhine_private *rp = netdev_priv(dev);
1857 void __iomem *ioaddr = rp->base; 1959 void __iomem *ioaddr = rp->base;
@@ -1862,7 +1964,7 @@ static void rhine_restart_tx(struct net_device *dev) {
1862 * If new errors occurred, we need to sort them out before doing Tx. 1964 * If new errors occurred, we need to sort them out before doing Tx.
1863 * In that case the ISR will be back here RSN anyway. 1965 * In that case the ISR will be back here RSN anyway.
1864 */ 1966 */
1865 intr_status = get_intr_status(dev); 1967 intr_status = rhine_get_events(rp);
1866 1968
1867 if ((intr_status & IntrTxErrSummary) == 0) { 1969 if ((intr_status & IntrTxErrSummary) == 0) {
1868 1970
@@ -1883,79 +1985,50 @@ static void rhine_restart_tx(struct net_device *dev) {
1883 } 1985 }
1884 else { 1986 else {
1885 /* This should never happen */ 1987 /* This should never happen */
1886 if (debug > 1) 1988 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
1887 netdev_warn(dev, "%s() Another error occurred %08x\n", 1989 intr_status);
1888 __func__, intr_status);
1889 } 1990 }
1890 1991
1891} 1992}
1892 1993
1893static void rhine_error(struct net_device *dev, int intr_status) 1994static void rhine_slow_event_task(struct work_struct *work)
1894{ 1995{
1895 struct rhine_private *rp = netdev_priv(dev); 1996 struct rhine_private *rp =
1896 void __iomem *ioaddr = rp->base; 1997 container_of(work, struct rhine_private, slow_event_task);
1998 struct net_device *dev = rp->dev;
1999 u32 intr_status;
1897 2000
1898 spin_lock(&rp->lock); 2001 mutex_lock(&rp->task_lock);
2002
2003 if (!rp->task_enable)
2004 goto out_unlock;
2005
2006 intr_status = rhine_get_events(rp);
2007 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
1899 2008
1900 if (intr_status & IntrLinkChange) 2009 if (intr_status & IntrLinkChange)
1901 rhine_check_media(dev, 0); 2010 rhine_check_media(dev, 0);
1902 if (intr_status & IntrStatsMax) {
1903 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1904 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1905 clear_tally_counters(ioaddr);
1906 }
1907 if (intr_status & IntrTxAborted) {
1908 if (debug > 1)
1909 netdev_info(dev, "Abort %08x, frame dropped\n",
1910 intr_status);
1911 }
1912 if (intr_status & IntrTxUnderrun) {
1913 if (rp->tx_thresh < 0xE0)
1914 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1915 if (debug > 1)
1916 netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
1917 rp->tx_thresh);
1918 }
1919 if (intr_status & IntrTxDescRace) {
1920 if (debug > 2)
1921 netdev_info(dev, "Tx descriptor write-back race\n");
1922 }
1923 if ((intr_status & IntrTxError) &&
1924 (intr_status & (IntrTxAborted |
1925 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1926 if (rp->tx_thresh < 0xE0) {
1927 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1928 }
1929 if (debug > 1)
1930 netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
1931 rp->tx_thresh);
1932 }
1933 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1934 IntrTxError))
1935 rhine_restart_tx(dev);
1936
1937 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1938 IntrTxError | IntrTxAborted | IntrNormalSummary |
1939 IntrTxDescRace)) {
1940 if (debug > 1)
1941 netdev_err(dev, "Something Wicked happened! %08x\n",
1942 intr_status);
1943 }
1944 2011
1945 spin_unlock(&rp->lock); 2012 if (intr_status & IntrPCIErr)
2013 netif_warn(rp, hw, dev, "PCI error\n");
2014
2015 napi_disable(&rp->napi);
2016 rhine_irq_disable(rp);
2017 /* Slow and safe. Consider __napi_schedule as a replacement ? */
2018 napi_enable(&rp->napi);
2019 napi_schedule(&rp->napi);
2020
2021out_unlock:
2022 mutex_unlock(&rp->task_lock);
1946} 2023}
1947 2024
1948static struct net_device_stats *rhine_get_stats(struct net_device *dev) 2025static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1949{ 2026{
1950 struct rhine_private *rp = netdev_priv(dev); 2027 struct rhine_private *rp = netdev_priv(dev);
1951 void __iomem *ioaddr = rp->base;
1952 unsigned long flags;
1953 2028
1954 spin_lock_irqsave(&rp->lock, flags); 2029 spin_lock_bh(&rp->lock);
1955 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 2030 rhine_update_rx_crc_and_missed_errord(rp);
1956 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 2031 spin_unlock_bh(&rp->lock);
1957 clear_tally_counters(ioaddr);
1958 spin_unlock_irqrestore(&rp->lock, flags);
1959 2032
1960 return &dev->stats; 2033 return &dev->stats;
1961} 2034}
@@ -2022,9 +2095,9 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2022 struct rhine_private *rp = netdev_priv(dev); 2095 struct rhine_private *rp = netdev_priv(dev);
2023 int rc; 2096 int rc;
2024 2097
2025 spin_lock_irq(&rp->lock); 2098 mutex_lock(&rp->task_lock);
2026 rc = mii_ethtool_gset(&rp->mii_if, cmd); 2099 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2027 spin_unlock_irq(&rp->lock); 2100 mutex_unlock(&rp->task_lock);
2028 2101
2029 return rc; 2102 return rc;
2030} 2103}
@@ -2034,10 +2107,10 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2034 struct rhine_private *rp = netdev_priv(dev); 2107 struct rhine_private *rp = netdev_priv(dev);
2035 int rc; 2108 int rc;
2036 2109
2037 spin_lock_irq(&rp->lock); 2110 mutex_lock(&rp->task_lock);
2038 rc = mii_ethtool_sset(&rp->mii_if, cmd); 2111 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2039 spin_unlock_irq(&rp->lock);
2040 rhine_set_carrier(&rp->mii_if); 2112 rhine_set_carrier(&rp->mii_if);
2113 mutex_unlock(&rp->task_lock);
2041 2114
2042 return rc; 2115 return rc;
2043} 2116}
@@ -2058,12 +2131,16 @@ static u32 netdev_get_link(struct net_device *dev)
2058 2131
2059static u32 netdev_get_msglevel(struct net_device *dev) 2132static u32 netdev_get_msglevel(struct net_device *dev)
2060{ 2133{
2061 return debug; 2134 struct rhine_private *rp = netdev_priv(dev);
2135
2136 return rp->msg_enable;
2062} 2137}
2063 2138
2064static void netdev_set_msglevel(struct net_device *dev, u32 value) 2139static void netdev_set_msglevel(struct net_device *dev, u32 value)
2065{ 2140{
2066 debug = value; 2141 struct rhine_private *rp = netdev_priv(dev);
2142
2143 rp->msg_enable = value;
2067} 2144}
2068 2145
2069static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2146static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2119,10 +2196,10 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2119 if (!netif_running(dev)) 2196 if (!netif_running(dev))
2120 return -EINVAL; 2197 return -EINVAL;
2121 2198
2122 spin_lock_irq(&rp->lock); 2199 mutex_lock(&rp->task_lock);
2123 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); 2200 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2124 spin_unlock_irq(&rp->lock);
2125 rhine_set_carrier(&rp->mii_if); 2201 rhine_set_carrier(&rp->mii_if);
2202 mutex_unlock(&rp->task_lock);
2126 2203
2127 return rc; 2204 return rc;
2128} 2205}
@@ -2132,27 +2209,21 @@ static int rhine_close(struct net_device *dev)
2132 struct rhine_private *rp = netdev_priv(dev); 2209 struct rhine_private *rp = netdev_priv(dev);
2133 void __iomem *ioaddr = rp->base; 2210 void __iomem *ioaddr = rp->base;
2134 2211
2212 rhine_task_disable(rp);
2135 napi_disable(&rp->napi); 2213 napi_disable(&rp->napi);
2136 cancel_work_sync(&rp->reset_task);
2137 netif_stop_queue(dev); 2214 netif_stop_queue(dev);
2138 2215
2139 spin_lock_irq(&rp->lock); 2216 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2140 2217 ioread16(ioaddr + ChipCmd));
2141 if (debug > 1)
2142 netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
2143 ioread16(ioaddr + ChipCmd));
2144 2218
2145 /* Switch to loopback mode to avoid hardware races. */ 2219 /* Switch to loopback mode to avoid hardware races. */
2146 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); 2220 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2147 2221
2148 /* Disable interrupts by clearing the interrupt mask. */ 2222 rhine_irq_disable(rp);
2149 iowrite16(0x0000, ioaddr + IntrEnable);
2150 2223
2151 /* Stop the chip's Tx and Rx processes. */ 2224 /* Stop the chip's Tx and Rx processes. */
2152 iowrite16(CmdStop, ioaddr + ChipCmd); 2225 iowrite16(CmdStop, ioaddr + ChipCmd);
2153 2226
2154 spin_unlock_irq(&rp->lock);
2155
2156 free_irq(rp->pdev->irq, dev); 2227 free_irq(rp->pdev->irq, dev);
2157 free_rbufs(dev); 2228 free_rbufs(dev);
2158 free_tbufs(dev); 2229 free_tbufs(dev);
@@ -2192,6 +2263,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
2192 if (rp->quirks & rq6patterns) 2263 if (rp->quirks & rq6patterns)
2193 iowrite8(0x04, ioaddr + WOLcgClr); 2264 iowrite8(0x04, ioaddr + WOLcgClr);
2194 2265
2266 spin_lock(&rp->lock);
2267
2195 if (rp->wolopts & WAKE_MAGIC) { 2268 if (rp->wolopts & WAKE_MAGIC) {
2196 iowrite8(WOLmagic, ioaddr + WOLcrSet); 2269 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2197 /* 2270 /*
@@ -2216,58 +2289,46 @@ static void rhine_shutdown (struct pci_dev *pdev)
2216 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); 2289 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2217 } 2290 }
2218 2291
2219 /* Hit power state D3 (sleep) */ 2292 spin_unlock(&rp->lock);
2220 if (!avoid_D3)
2221 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2222 2293
2223 /* TODO: Check use of pci_enable_wake() */ 2294 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2295 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2224 2296
2297 pci_wake_from_d3(pdev, true);
2298 pci_set_power_state(pdev, PCI_D3hot);
2299 }
2225} 2300}
2226 2301
2227#ifdef CONFIG_PM 2302#ifdef CONFIG_PM_SLEEP
2228static int rhine_suspend(struct pci_dev *pdev, pm_message_t state) 2303static int rhine_suspend(struct device *device)
2229{ 2304{
2305 struct pci_dev *pdev = to_pci_dev(device);
2230 struct net_device *dev = pci_get_drvdata(pdev); 2306 struct net_device *dev = pci_get_drvdata(pdev);
2231 struct rhine_private *rp = netdev_priv(dev); 2307 struct rhine_private *rp = netdev_priv(dev);
2232 unsigned long flags;
2233 2308
2234 if (!netif_running(dev)) 2309 if (!netif_running(dev))
2235 return 0; 2310 return 0;
2236 2311
2312 rhine_task_disable(rp);
2313 rhine_irq_disable(rp);
2237 napi_disable(&rp->napi); 2314 napi_disable(&rp->napi);
2238 2315
2239 netif_device_detach(dev); 2316 netif_device_detach(dev);
2240 pci_save_state(pdev);
2241 2317
2242 spin_lock_irqsave(&rp->lock, flags);
2243 rhine_shutdown(pdev); 2318 rhine_shutdown(pdev);
2244 spin_unlock_irqrestore(&rp->lock, flags);
2245 2319
2246 free_irq(dev->irq, dev);
2247 return 0; 2320 return 0;
2248} 2321}
2249 2322
2250static int rhine_resume(struct pci_dev *pdev) 2323static int rhine_resume(struct device *device)
2251{ 2324{
2325 struct pci_dev *pdev = to_pci_dev(device);
2252 struct net_device *dev = pci_get_drvdata(pdev); 2326 struct net_device *dev = pci_get_drvdata(pdev);
2253 struct rhine_private *rp = netdev_priv(dev); 2327 struct rhine_private *rp = netdev_priv(dev);
2254 unsigned long flags;
2255 int ret;
2256 2328
2257 if (!netif_running(dev)) 2329 if (!netif_running(dev))
2258 return 0; 2330 return 0;
2259 2331
2260 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
2261 netdev_err(dev, "request_irq failed\n");
2262
2263 ret = pci_set_power_state(pdev, PCI_D0);
2264 if (debug > 1)
2265 netdev_info(dev, "Entering power state D0 %s (%d)\n",
2266 ret ? "failed" : "succeeded", ret);
2267
2268 pci_restore_state(pdev);
2269
2270 spin_lock_irqsave(&rp->lock, flags);
2271#ifdef USE_MMIO 2332#ifdef USE_MMIO
2272 enable_mmio(rp->pioaddr, rp->quirks); 2333 enable_mmio(rp->pioaddr, rp->quirks);
2273#endif 2334#endif
@@ -2276,25 +2337,32 @@ static int rhine_resume(struct pci_dev *pdev)
2276 free_rbufs(dev); 2337 free_rbufs(dev);
2277 alloc_tbufs(dev); 2338 alloc_tbufs(dev);
2278 alloc_rbufs(dev); 2339 alloc_rbufs(dev);
2340 rhine_task_enable(rp);
2341 spin_lock_bh(&rp->lock);
2279 init_registers(dev); 2342 init_registers(dev);
2280 spin_unlock_irqrestore(&rp->lock, flags); 2343 spin_unlock_bh(&rp->lock);
2281 2344
2282 netif_device_attach(dev); 2345 netif_device_attach(dev);
2283 2346
2284 return 0; 2347 return 0;
2285} 2348}
2286#endif /* CONFIG_PM */ 2349
2350static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2351#define RHINE_PM_OPS (&rhine_pm_ops)
2352
2353#else
2354
2355#define RHINE_PM_OPS NULL
2356
2357#endif /* !CONFIG_PM_SLEEP */
2287 2358
2288static struct pci_driver rhine_driver = { 2359static struct pci_driver rhine_driver = {
2289 .name = DRV_NAME, 2360 .name = DRV_NAME,
2290 .id_table = rhine_pci_tbl, 2361 .id_table = rhine_pci_tbl,
2291 .probe = rhine_init_one, 2362 .probe = rhine_init_one,
2292 .remove = __devexit_p(rhine_remove_one), 2363 .remove = __devexit_p(rhine_remove_one),
2293#ifdef CONFIG_PM 2364 .shutdown = rhine_shutdown,
2294 .suspend = rhine_suspend, 2365 .driver.pm = RHINE_PM_OPS,
2295 .resume = rhine_resume,
2296#endif /* CONFIG_PM */
2297 .shutdown = rhine_shutdown,
2298}; 2366};
2299 2367
2300static struct dmi_system_id __initdata rhine_dmi_table[] = { 2368static struct dmi_system_id __initdata rhine_dmi_table[] = {