aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFrancois Romieu <romieu@fr.zoreil.com>2012-01-06 15:42:26 -0500
committerFrancois Romieu <romieu@fr.zoreil.com>2012-01-07 17:02:37 -0500
commit7ab87ff4c770eed71e3777936299292739fcd0fe (patch)
treed6eac281d61f8cdf84a387b2ef455d20f058001d /drivers
parenta5abec1e84c3d9cd197667e5fa94f25c21a2cb8e (diff)
via-rhine: move work from irq handler to softirq and beyond.
- Tx processing is moved from the irq handler to NAPI poll - link events and obscure event processing is moved to its proper work queue Locking rules undergo some changes through the driver. - the driver offers the usual lock-free Tx path - besides the IRQ handler, the link event task schedules the napi handler. The driver thus adds some internal locking to prevent a loop when both must be disabled. - the reset task keeps being scheduled from the Tx watchdog handler, thus with implicit Tx queue disabling. It does not need to care about irq, only napi softirq and competing task. - it is not worth to add a dedicated lock between {g, s}et_wol and rhine_shutdown. It should not hurt no narrow it down a bit though. - rhine_reset_task must keep its huge spin_lock_bh protected section due to : - races for the CAM registers (see rhine_vlan_rx_{add, kill}_vid) - implicit use of napi_enable (see init_registers) - use of the same lock for stats read / update exclusion between napi rx processing and rhine_get_stats - rhine_resume requires a softirq disabled section for the same reason as rhine_reset_task - {free, request}_irq have been replaced with IntrEnable actions in rhine_{suspend, resume}. It is hidden behind init_registers for the latter. Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/via/via-rhine.c402
1 files changed, 225 insertions, 177 deletions
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index d17ca32c8280..9a7bacc5c1d7 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -42,7 +42,6 @@
42 42
43#define DEBUG 43#define DEBUG
44static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ 44static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
45static int max_interrupt_work = 20;
46 45
47/* Set the copy breakpoint for the copy-only-tiny-frames scheme. 46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
48 Setting to > 1518 effectively disables this feature. */ 47 Setting to > 1518 effectively disables this feature. */
@@ -128,11 +127,9 @@ MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL"); 128MODULE_LICENSE("GPL");
130 129
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0); 130module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0); 131module_param(rx_copybreak, int, 0);
134module_param(avoid_D3, bool, 0); 132module_param(avoid_D3, bool, 0);
135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); 133MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 134MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 135MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
@@ -351,16 +348,25 @@ static const int mmio_verify_registers[] = {
351 348
352/* Bits in the interrupt status/mask registers. */ 349/* Bits in the interrupt status/mask registers. */
353enum intr_status_bits { 350enum intr_status_bits {
354 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020, 351 IntrRxDone = 0x0001,
355 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210, 352 IntrTxDone = 0x0002,
356 IntrPCIErr=0x0040, 353 IntrRxErr = 0x0004,
357 IntrStatsMax=0x0080, IntrRxEarly=0x0100, 354 IntrTxError = 0x0008,
358 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000, 355 IntrRxEmpty = 0x0020,
359 IntrTxAborted=0x2000, IntrLinkChange=0x4000, 356 IntrPCIErr = 0x0040,
360 IntrRxWakeUp=0x8000, 357 IntrStatsMax = 0x0080,
361 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260, 358 IntrRxEarly = 0x0100,
362 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */ 359 IntrTxUnderrun = 0x0210,
363 IntrTxErrSummary=0x082218, 360 IntrRxOverflow = 0x0400,
361 IntrRxDropped = 0x0800,
362 IntrRxNoBuf = 0x1000,
363 IntrTxAborted = 0x2000,
364 IntrLinkChange = 0x4000,
365 IntrRxWakeUp = 0x8000,
366 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
367 IntrNormalSummary = IntrRxDone | IntrTxDone,
368 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
369 IntrTxUnderrun,
364}; 370};
365 371
366/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ 372/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
@@ -439,6 +445,9 @@ struct rhine_private {
439 struct net_device *dev; 445 struct net_device *dev;
440 struct napi_struct napi; 446 struct napi_struct napi;
441 spinlock_t lock; 447 spinlock_t lock;
448 struct mutex task_lock;
449 bool task_enable;
450 struct work_struct slow_event_task;
442 struct work_struct reset_task; 451 struct work_struct reset_task;
443 452
444 /* Frequently used values: keep some adjacent for cache effect. */ 453 /* Frequently used values: keep some adjacent for cache effect. */
@@ -476,13 +485,13 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
476static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 485static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
477static int rhine_open(struct net_device *dev); 486static int rhine_open(struct net_device *dev);
478static void rhine_reset_task(struct work_struct *work); 487static void rhine_reset_task(struct work_struct *work);
488static void rhine_slow_event_task(struct work_struct *work);
479static void rhine_tx_timeout(struct net_device *dev); 489static void rhine_tx_timeout(struct net_device *dev);
480static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 490static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
481 struct net_device *dev); 491 struct net_device *dev);
482static irqreturn_t rhine_interrupt(int irq, void *dev_instance); 492static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
483static void rhine_tx(struct net_device *dev); 493static void rhine_tx(struct net_device *dev);
484static int rhine_rx(struct net_device *dev, int limit); 494static int rhine_rx(struct net_device *dev, int limit);
485static void rhine_error(struct net_device *dev, int intr_status);
486static void rhine_set_rx_mode(struct net_device *dev); 495static void rhine_set_rx_mode(struct net_device *dev);
487static struct net_device_stats *rhine_get_stats(struct net_device *dev); 496static struct net_device_stats *rhine_get_stats(struct net_device *dev);
488static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 497static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -490,6 +499,7 @@ static const struct ethtool_ops netdev_ethtool_ops;
490static int rhine_close(struct net_device *dev); 499static int rhine_close(struct net_device *dev);
491static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); 500static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
492static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); 501static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
502static void rhine_restart_tx(struct net_device *dev);
493 503
494#define RHINE_WAIT_FOR(condition) \ 504#define RHINE_WAIT_FOR(condition) \
495do { \ 505do { \
@@ -520,7 +530,7 @@ static void rhine_ack_events(struct rhine_private *rp, u32 mask)
520 if (rp->quirks & rqStatusWBRace) 530 if (rp->quirks & rqStatusWBRace)
521 iowrite8(mask >> 16, ioaddr + IntrStatus2); 531 iowrite8(mask >> 16, ioaddr + IntrStatus2);
522 iowrite16(mask, ioaddr + IntrStatus); 532 iowrite16(mask, ioaddr + IntrStatus);
523 IOSYNC; 533 mmiowb();
524} 534}
525 535
526/* 536/*
@@ -669,23 +679,125 @@ static void rhine_kick_tx_threshold(struct rhine_private *rp)
669 } 679 }
670} 680}
671 681
682static void rhine_tx_err(struct rhine_private *rp, u32 status)
683{
684 struct net_device *dev = rp->dev;
685
686 if (status & IntrTxAborted) {
687 if (debug > 1)
688 netdev_info(dev, "Abort %08x, frame dropped\n", status);
689 }
690
691 if (status & IntrTxUnderrun) {
692 rhine_kick_tx_threshold(rp);
693 if (debug > 1)
694 netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
695 rp->tx_thresh);
696 }
697
698 if (status & IntrTxDescRace) {
699 if (debug > 2)
700 netdev_info(dev, "Tx descriptor write-back race\n");
701 }
702
703 if ((status & IntrTxError) &&
704 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
705 rhine_kick_tx_threshold(rp);
706 if (debug > 1)
707 netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
708 rp->tx_thresh);
709 }
710
711 rhine_restart_tx(dev);
712}
713
714static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
715{
716 void __iomem *ioaddr = rp->base;
717 struct net_device_stats *stats = &rp->dev->stats;
718
719 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
720 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
721
722 /*
723 * Clears the "tally counters" for CRC errors and missed frames(?).
724 * It has been reported that some chips need a write of 0 to clear
725 * these, for others the counters are set to 1 when written to and
726 * instead cleared when read. So we clear them both ways ...
727 */
728 iowrite32(0, ioaddr + RxMissed);
729 ioread16(ioaddr + RxCRCErrs);
730 ioread16(ioaddr + RxMissed);
731}
732
733#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
734 IntrRxErr | \
735 IntrRxEmpty | \
736 IntrRxOverflow | \
737 IntrRxDropped | \
738 IntrRxNoBuf | \
739 IntrRxWakeUp)
740
741#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
742 IntrTxAborted | \
743 IntrTxUnderrun | \
744 IntrTxDescRace)
745#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
746
747#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
748 RHINE_EVENT_NAPI_TX | \
749 IntrStatsMax)
750#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
751#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
752
672static int rhine_napipoll(struct napi_struct *napi, int budget) 753static int rhine_napipoll(struct napi_struct *napi, int budget)
673{ 754{
674 struct rhine_private *rp = container_of(napi, struct rhine_private, napi); 755 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
675 struct net_device *dev = rp->dev; 756 struct net_device *dev = rp->dev;
676 void __iomem *ioaddr = rp->base; 757 void __iomem *ioaddr = rp->base;
677 int work_done; 758 u16 enable_mask = RHINE_EVENT & 0xffff;
759 int work_done = 0;
760 u32 status;
761
762 status = rhine_get_events(rp);
763 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
764
765 if (status & RHINE_EVENT_NAPI_RX)
766 work_done += rhine_rx(dev, budget);
767
768 if (status & RHINE_EVENT_NAPI_TX) {
769 if (status & RHINE_EVENT_NAPI_TX_ERR) {
770 u8 cmd;
771
772 /* Avoid scavenging before Tx engine turned off */
773 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd) & CmdTxOn));
774 cmd = ioread8(ioaddr + ChipCmd);
775 if ((cmd & CmdTxOn) && (debug > 2)) {
776 netdev_warn(dev, "%s: Tx engine still on\n",
777 __func__);
778 }
779 }
780 rhine_tx(dev);
781
782 if (status & RHINE_EVENT_NAPI_TX_ERR)
783 rhine_tx_err(rp, status);
784 }
785
786 if (status & IntrStatsMax) {
787 spin_lock(&rp->lock);
788 rhine_update_rx_crc_and_missed_errord(rp);
789 spin_unlock(&rp->lock);
790 }
678 791
679 work_done = rhine_rx(dev, budget); 792 if (status & RHINE_EVENT_SLOW) {
793 enable_mask &= ~RHINE_EVENT_SLOW;
794 schedule_work(&rp->slow_event_task);
795 }
680 796
681 if (work_done < budget) { 797 if (work_done < budget) {
682 napi_complete(napi); 798 napi_complete(napi);
683 799 iowrite16(enable_mask, ioaddr + IntrEnable);
684 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 800 mmiowb();
685 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
686 IntrTxDone | IntrTxError | IntrTxUnderrun |
687 IntrPCIErr | IntrStatsMax | IntrLinkChange,
688 ioaddr + IntrEnable);
689 } 801 }
690 return work_done; 802 return work_done;
691} 803}
@@ -868,7 +980,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
868 dev->irq = pdev->irq; 980 dev->irq = pdev->irq;
869 981
870 spin_lock_init(&rp->lock); 982 spin_lock_init(&rp->lock);
983 mutex_init(&rp->task_lock);
871 INIT_WORK(&rp->reset_task, rhine_reset_task); 984 INIT_WORK(&rp->reset_task, rhine_reset_task);
985 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
872 986
873 rp->mii_if.dev = dev; 987 rp->mii_if.dev = dev;
874 rp->mii_if.mdio_read = mdio_read; 988 rp->mii_if.mdio_read = mdio_read;
@@ -1278,10 +1392,10 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1278{ 1392{
1279 struct rhine_private *rp = netdev_priv(dev); 1393 struct rhine_private *rp = netdev_priv(dev);
1280 1394
1281 spin_lock_irq(&rp->lock); 1395 spin_lock_bh(&rp->lock);
1282 set_bit(vid, rp->active_vlans); 1396 set_bit(vid, rp->active_vlans);
1283 rhine_update_vcam(dev); 1397 rhine_update_vcam(dev);
1284 spin_unlock_irq(&rp->lock); 1398 spin_unlock_bh(&rp->lock);
1285 return 0; 1399 return 0;
1286} 1400}
1287 1401
@@ -1289,10 +1403,10 @@ static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1289{ 1403{
1290 struct rhine_private *rp = netdev_priv(dev); 1404 struct rhine_private *rp = netdev_priv(dev);
1291 1405
1292 spin_lock_irq(&rp->lock); 1406 spin_lock_bh(&rp->lock);
1293 clear_bit(vid, rp->active_vlans); 1407 clear_bit(vid, rp->active_vlans);
1294 rhine_update_vcam(dev); 1408 rhine_update_vcam(dev);
1295 spin_unlock_irq(&rp->lock); 1409 spin_unlock_bh(&rp->lock);
1296 return 0; 1410 return 0;
1297} 1411}
1298 1412
@@ -1322,12 +1436,7 @@ static void init_registers(struct net_device *dev)
1322 1436
1323 napi_enable(&rp->napi); 1437 napi_enable(&rp->napi);
1324 1438
1325 /* Enable interrupts by setting the interrupt mask. */ 1439 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1326 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1327 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1328 IntrTxDone | IntrTxError | IntrTxUnderrun |
1329 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1330 ioaddr + IntrEnable);
1331 1440
1332 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), 1441 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1333 ioaddr + ChipCmd); 1442 ioaddr + ChipCmd);
@@ -1407,6 +1516,23 @@ static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value
1407 rhine_enable_linkmon(ioaddr); 1516 rhine_enable_linkmon(ioaddr);
1408} 1517}
1409 1518
1519static void rhine_task_disable(struct rhine_private *rp)
1520{
1521 mutex_lock(&rp->task_lock);
1522 rp->task_enable = false;
1523 mutex_unlock(&rp->task_lock);
1524
1525 cancel_work_sync(&rp->slow_event_task);
1526 cancel_work_sync(&rp->reset_task);
1527}
1528
1529static void rhine_task_enable(struct rhine_private *rp)
1530{
1531 mutex_lock(&rp->task_lock);
1532 rp->task_enable = true;
1533 mutex_unlock(&rp->task_lock);
1534}
1535
1410static int rhine_open(struct net_device *dev) 1536static int rhine_open(struct net_device *dev)
1411{ 1537{
1412 struct rhine_private *rp = netdev_priv(dev); 1538 struct rhine_private *rp = netdev_priv(dev);
@@ -1429,6 +1555,7 @@ static int rhine_open(struct net_device *dev)
1429 alloc_rbufs(dev); 1555 alloc_rbufs(dev);
1430 alloc_tbufs(dev); 1556 alloc_tbufs(dev);
1431 rhine_chip_reset(dev); 1557 rhine_chip_reset(dev);
1558 rhine_task_enable(rp);
1432 init_registers(dev); 1559 init_registers(dev);
1433 if (debug > 2) 1560 if (debug > 2)
1434 netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n", 1561 netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
@@ -1446,11 +1573,12 @@ static void rhine_reset_task(struct work_struct *work)
1446 reset_task); 1573 reset_task);
1447 struct net_device *dev = rp->dev; 1574 struct net_device *dev = rp->dev;
1448 1575
1449 /* protect against concurrent rx interrupts */ 1576 mutex_lock(&rp->task_lock);
1450 disable_irq(rp->pdev->irq);
1451 1577
1452 napi_disable(&rp->napi); 1578 if (!rp->task_enable)
1579 goto out_unlock;
1453 1580
1581 napi_disable(&rp->napi);
1454 spin_lock_bh(&rp->lock); 1582 spin_lock_bh(&rp->lock);
1455 1583
1456 /* clear all descriptors */ 1584 /* clear all descriptors */
@@ -1464,11 +1592,13 @@ static void rhine_reset_task(struct work_struct *work)
1464 init_registers(dev); 1592 init_registers(dev);
1465 1593
1466 spin_unlock_bh(&rp->lock); 1594 spin_unlock_bh(&rp->lock);
1467 enable_irq(rp->pdev->irq);
1468 1595
1469 dev->trans_start = jiffies; /* prevent tx timeout */ 1596 dev->trans_start = jiffies; /* prevent tx timeout */
1470 dev->stats.tx_errors++; 1597 dev->stats.tx_errors++;
1471 netif_wake_queue(dev); 1598 netif_wake_queue(dev);
1599
1600out_unlock:
1601 mutex_unlock(&rp->task_lock);
1472} 1602}
1473 1603
1474static void rhine_tx_timeout(struct net_device *dev) 1604static void rhine_tx_timeout(struct net_device *dev)
@@ -1489,7 +1619,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1489 struct rhine_private *rp = netdev_priv(dev); 1619 struct rhine_private *rp = netdev_priv(dev);
1490 void __iomem *ioaddr = rp->base; 1620 void __iomem *ioaddr = rp->base;
1491 unsigned entry; 1621 unsigned entry;
1492 unsigned long flags;
1493 1622
1494 /* Caution: the write order is important here, set the field 1623 /* Caution: the write order is important here, set the field
1495 with the "ownership" bits last. */ 1624 with the "ownership" bits last. */
@@ -1541,7 +1670,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1541 rp->tx_ring[entry].tx_status = 0; 1670 rp->tx_ring[entry].tx_status = 0;
1542 1671
1543 /* lock eth irq */ 1672 /* lock eth irq */
1544 spin_lock_irqsave(&rp->lock, flags);
1545 wmb(); 1673 wmb();
1546 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); 1674 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1547 wmb(); 1675 wmb();
@@ -1562,8 +1690,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1562 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) 1690 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1563 netif_stop_queue(dev); 1691 netif_stop_queue(dev);
1564 1692
1565 spin_unlock_irqrestore(&rp->lock, flags);
1566
1567 if (debug > 4) { 1693 if (debug > 4) {
1568 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n", 1694 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1569 rp->cur_tx-1, entry); 1695 rp->cur_tx-1, entry);
@@ -1571,66 +1697,39 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1571 return NETDEV_TX_OK; 1697 return NETDEV_TX_OK;
1572} 1698}
1573 1699
1700static void rhine_irq_disable(struct rhine_private *rp)
1701{
1702 iowrite16(0x0000, rp->base + IntrEnable);
1703 mmiowb();
1704}
1705
1574/* The interrupt handler does all of the Rx thread work and cleans up 1706/* The interrupt handler does all of the Rx thread work and cleans up
1575 after the Tx thread. */ 1707 after the Tx thread. */
1576static irqreturn_t rhine_interrupt(int irq, void *dev_instance) 1708static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1577{ 1709{
1578 struct net_device *dev = dev_instance; 1710 struct net_device *dev = dev_instance;
1579 struct rhine_private *rp = netdev_priv(dev); 1711 struct rhine_private *rp = netdev_priv(dev);
1580 void __iomem *ioaddr = rp->base; 1712 u32 status;
1581 u32 intr_status;
1582 int boguscnt = max_interrupt_work;
1583 int handled = 0; 1713 int handled = 0;
1584 1714
1585 while ((intr_status = rhine_get_events(rp))) { 1715 status = rhine_get_events(rp);
1586 handled = 1;
1587
1588 /* Acknowledge all of the current interrupt sources ASAP. */
1589 rhine_ack_events(rp, intr_status);
1590
1591 if (debug > 4)
1592 netdev_dbg(dev, "Interrupt, status %08x\n",
1593 intr_status);
1594 1716
1595 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | 1717 if (debug > 4)
1596 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { 1718 netdev_dbg(dev, "Interrupt, status %08x\n", status);
1597 iowrite16(IntrTxAborted |
1598 IntrTxDone | IntrTxError | IntrTxUnderrun |
1599 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1600 ioaddr + IntrEnable);
1601 1719
1602 napi_schedule(&rp->napi); 1720 if (status & RHINE_EVENT) {
1603 } 1721 handled = 1;
1604
1605 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1606 if (intr_status & IntrTxErrSummary) {
1607 /* Avoid scavenging before Tx engine turned off */
1608 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1609 if (debug > 2 &&
1610 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1611 netdev_warn(dev,
1612 "%s: Tx engine still on\n",
1613 __func__);
1614 }
1615 rhine_tx(dev);
1616 }
1617 1722
1618 /* Abnormal error summary/uncommon events handlers. */ 1723 rhine_irq_disable(rp);
1619 if (intr_status & (IntrPCIErr | IntrLinkChange | 1724 napi_schedule(&rp->napi);
1620 IntrStatsMax | IntrTxError | IntrTxAborted | 1725 }
1621 IntrTxUnderrun | IntrTxDescRace))
1622 rhine_error(dev, intr_status);
1623 1726
1624 if (--boguscnt < 0) { 1727 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1625 netdev_warn(dev, "Too much work at interrupt, status=%#08x\n", 1728 if (debug > 1)
1626 intr_status); 1729 netdev_err(dev, "Something Wicked happened! %08x\n",
1627 break; 1730 status);
1628 }
1629 } 1731 }
1630 1732
1631 if (debug > 3)
1632 netdev_dbg(dev, "exiting interrupt, status=%08x\n",
1633 ioread16(ioaddr + IntrStatus));
1634 return IRQ_RETVAL(handled); 1733 return IRQ_RETVAL(handled);
1635} 1734}
1636 1735
@@ -1641,8 +1740,6 @@ static void rhine_tx(struct net_device *dev)
1641 struct rhine_private *rp = netdev_priv(dev); 1740 struct rhine_private *rp = netdev_priv(dev);
1642 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; 1741 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1643 1742
1644 spin_lock(&rp->lock);
1645
1646 /* find and cleanup dirty tx descriptors */ 1743 /* find and cleanup dirty tx descriptors */
1647 while (rp->dirty_tx != rp->cur_tx) { 1744 while (rp->dirty_tx != rp->cur_tx) {
1648 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); 1745 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
@@ -1696,8 +1793,6 @@ static void rhine_tx(struct net_device *dev)
1696 } 1793 }
1697 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) 1794 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1698 netif_wake_queue(dev); 1795 netif_wake_queue(dev);
1699
1700 spin_unlock(&rp->lock);
1701} 1796}
1702 1797
1703/** 1798/**
@@ -1848,19 +1943,6 @@ static int rhine_rx(struct net_device *dev, int limit)
1848 return count; 1943 return count;
1849} 1944}
1850 1945
1851/*
1852 * Clears the "tally counters" for CRC errors and missed frames(?).
1853 * It has been reported that some chips need a write of 0 to clear
1854 * these, for others the counters are set to 1 when written to and
1855 * instead cleared when read. So we clear them both ways ...
1856 */
1857static inline void clear_tally_counters(void __iomem *ioaddr)
1858{
1859 iowrite32(0, ioaddr + RxMissed);
1860 ioread16(ioaddr + RxCRCErrs);
1861 ioread16(ioaddr + RxMissed);
1862}
1863
1864static void rhine_restart_tx(struct net_device *dev) { 1946static void rhine_restart_tx(struct net_device *dev) {
1865 struct rhine_private *rp = netdev_priv(dev); 1947 struct rhine_private *rp = netdev_priv(dev);
1866 void __iomem *ioaddr = rp->base; 1948 void __iomem *ioaddr = rp->base;
@@ -1899,69 +1981,41 @@ static void rhine_restart_tx(struct net_device *dev) {
1899 1981
1900} 1982}
1901 1983
1902static void rhine_error(struct net_device *dev, int intr_status) 1984static void rhine_slow_event_task(struct work_struct *work)
1903{ 1985{
1904 struct rhine_private *rp = netdev_priv(dev); 1986 struct rhine_private *rp =
1905 void __iomem *ioaddr = rp->base; 1987 container_of(work, struct rhine_private, slow_event_task);
1988 struct net_device *dev = rp->dev;
1989 u32 intr_status;
1906 1990
1907 spin_lock(&rp->lock); 1991 mutex_lock(&rp->task_lock);
1992
1993 if (!rp->task_enable)
1994 goto out_unlock;
1995
1996 intr_status = rhine_get_events(rp);
1997 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
1908 1998
1909 if (intr_status & IntrLinkChange) 1999 if (intr_status & IntrLinkChange)
1910 rhine_check_media(dev, 0); 2000 rhine_check_media(dev, 0);
1911 if (intr_status & IntrStatsMax) {
1912 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1913 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1914 clear_tally_counters(ioaddr);
1915 }
1916 if (intr_status & IntrTxAborted) {
1917 if (debug > 1)
1918 netdev_info(dev, "Abort %08x, frame dropped\n",
1919 intr_status);
1920 }
1921 if (intr_status & IntrTxUnderrun) {
1922 rhine_kick_tx_threshold(rp);
1923 if (debug > 1)
1924 netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
1925 rp->tx_thresh);
1926 }
1927 if (intr_status & IntrTxDescRace) {
1928 if (debug > 2)
1929 netdev_info(dev, "Tx descriptor write-back race\n");
1930 }
1931 if ((intr_status & IntrTxError) &&
1932 (intr_status & (IntrTxAborted |
1933 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1934 rhine_kick_tx_threshold(rp);
1935 if (debug > 1)
1936 netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
1937 rp->tx_thresh);
1938 }
1939 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1940 IntrTxError))
1941 rhine_restart_tx(dev);
1942 2001
1943 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun | 2002 napi_disable(&rp->napi);
1944 IntrTxError | IntrTxAborted | IntrNormalSummary | 2003 rhine_irq_disable(rp);
1945 IntrTxDescRace)) { 2004 /* Slow and safe. Consider __napi_schedule as a replacement ? */
1946 if (debug > 1) 2005 napi_enable(&rp->napi);
1947 netdev_err(dev, "Something Wicked happened! %08x\n", 2006 napi_schedule(&rp->napi);
1948 intr_status);
1949 }
1950 2007
1951 spin_unlock(&rp->lock); 2008out_unlock:
2009 mutex_unlock(&rp->task_lock);
1952} 2010}
1953 2011
1954static struct net_device_stats *rhine_get_stats(struct net_device *dev) 2012static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1955{ 2013{
1956 struct rhine_private *rp = netdev_priv(dev); 2014 struct rhine_private *rp = netdev_priv(dev);
1957 void __iomem *ioaddr = rp->base;
1958 unsigned long flags;
1959 2015
1960 spin_lock_irqsave(&rp->lock, flags); 2016 spin_lock_bh(&rp->lock);
1961 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 2017 rhine_update_rx_crc_and_missed_errord(rp);
1962 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 2018 spin_unlock_bh(&rp->lock);
1963 clear_tally_counters(ioaddr);
1964 spin_unlock_irqrestore(&rp->lock, flags);
1965 2019
1966 return &dev->stats; 2020 return &dev->stats;
1967} 2021}
@@ -2028,9 +2082,9 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2028 struct rhine_private *rp = netdev_priv(dev); 2082 struct rhine_private *rp = netdev_priv(dev);
2029 int rc; 2083 int rc;
2030 2084
2031 spin_lock_irq(&rp->lock); 2085 mutex_lock(&rp->task_lock);
2032 rc = mii_ethtool_gset(&rp->mii_if, cmd); 2086 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2033 spin_unlock_irq(&rp->lock); 2087 mutex_unlock(&rp->task_lock);
2034 2088
2035 return rc; 2089 return rc;
2036} 2090}
@@ -2040,10 +2094,10 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2040 struct rhine_private *rp = netdev_priv(dev); 2094 struct rhine_private *rp = netdev_priv(dev);
2041 int rc; 2095 int rc;
2042 2096
2043 spin_lock_irq(&rp->lock); 2097 mutex_lock(&rp->task_lock);
2044 rc = mii_ethtool_sset(&rp->mii_if, cmd); 2098 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2045 spin_unlock_irq(&rp->lock);
2046 rhine_set_carrier(&rp->mii_if); 2099 rhine_set_carrier(&rp->mii_if);
2100 mutex_unlock(&rp->task_lock);
2047 2101
2048 return rc; 2102 return rc;
2049} 2103}
@@ -2125,10 +2179,10 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2125 if (!netif_running(dev)) 2179 if (!netif_running(dev))
2126 return -EINVAL; 2180 return -EINVAL;
2127 2181
2128 spin_lock_irq(&rp->lock); 2182 mutex_lock(&rp->task_lock);
2129 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); 2183 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2130 spin_unlock_irq(&rp->lock);
2131 rhine_set_carrier(&rp->mii_if); 2184 rhine_set_carrier(&rp->mii_if);
2185 mutex_unlock(&rp->task_lock);
2132 2186
2133 return rc; 2187 return rc;
2134} 2188}
@@ -2138,12 +2192,10 @@ static int rhine_close(struct net_device *dev)
2138 struct rhine_private *rp = netdev_priv(dev); 2192 struct rhine_private *rp = netdev_priv(dev);
2139 void __iomem *ioaddr = rp->base; 2193 void __iomem *ioaddr = rp->base;
2140 2194
2195 rhine_task_disable(rp);
2141 napi_disable(&rp->napi); 2196 napi_disable(&rp->napi);
2142 cancel_work_sync(&rp->reset_task);
2143 netif_stop_queue(dev); 2197 netif_stop_queue(dev);
2144 2198
2145 spin_lock_irq(&rp->lock);
2146
2147 if (debug > 1) 2199 if (debug > 1)
2148 netdev_dbg(dev, "Shutting down ethercard, status was %04x\n", 2200 netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
2149 ioread16(ioaddr + ChipCmd)); 2201 ioread16(ioaddr + ChipCmd));
@@ -2151,14 +2203,11 @@ static int rhine_close(struct net_device *dev)
2151 /* Switch to loopback mode to avoid hardware races. */ 2203 /* Switch to loopback mode to avoid hardware races. */
2152 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); 2204 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2153 2205
2154 /* Disable interrupts by clearing the interrupt mask. */ 2206 rhine_irq_disable(rp);
2155 iowrite16(0x0000, ioaddr + IntrEnable);
2156 2207
2157 /* Stop the chip's Tx and Rx processes. */ 2208 /* Stop the chip's Tx and Rx processes. */
2158 iowrite16(CmdStop, ioaddr + ChipCmd); 2209 iowrite16(CmdStop, ioaddr + ChipCmd);
2159 2210
2160 spin_unlock_irq(&rp->lock);
2161
2162 free_irq(rp->pdev->irq, dev); 2211 free_irq(rp->pdev->irq, dev);
2163 free_rbufs(dev); 2212 free_rbufs(dev);
2164 free_tbufs(dev); 2213 free_tbufs(dev);
@@ -2198,6 +2247,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
2198 if (rp->quirks & rq6patterns) 2247 if (rp->quirks & rq6patterns)
2199 iowrite8(0x04, ioaddr + WOLcgClr); 2248 iowrite8(0x04, ioaddr + WOLcgClr);
2200 2249
2250 spin_lock(&rp->lock);
2251
2201 if (rp->wolopts & WAKE_MAGIC) { 2252 if (rp->wolopts & WAKE_MAGIC) {
2202 iowrite8(WOLmagic, ioaddr + WOLcrSet); 2253 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2203 /* 2254 /*
@@ -2222,6 +2273,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
2222 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); 2273 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2223 } 2274 }
2224 2275
2276 spin_unlock(&rp->lock);
2277
2225 /* Hit power state D3 (sleep) */ 2278 /* Hit power state D3 (sleep) */
2226 if (!avoid_D3) 2279 if (!avoid_D3)
2227 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); 2280 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
@@ -2235,21 +2288,19 @@ static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
2235{ 2288{
2236 struct net_device *dev = pci_get_drvdata(pdev); 2289 struct net_device *dev = pci_get_drvdata(pdev);
2237 struct rhine_private *rp = netdev_priv(dev); 2290 struct rhine_private *rp = netdev_priv(dev);
2238 unsigned long flags;
2239 2291
2240 if (!netif_running(dev)) 2292 if (!netif_running(dev))
2241 return 0; 2293 return 0;
2242 2294
2295 rhine_task_disable(rp);
2296 rhine_irq_disable(rp);
2243 napi_disable(&rp->napi); 2297 napi_disable(&rp->napi);
2244 2298
2245 netif_device_detach(dev); 2299 netif_device_detach(dev);
2246 pci_save_state(pdev); 2300 pci_save_state(pdev);
2247 2301
2248 spin_lock_irqsave(&rp->lock, flags);
2249 rhine_shutdown(pdev); 2302 rhine_shutdown(pdev);
2250 spin_unlock_irqrestore(&rp->lock, flags);
2251 2303
2252 free_irq(dev->irq, dev);
2253 return 0; 2304 return 0;
2254} 2305}
2255 2306
@@ -2257,15 +2308,11 @@ static int rhine_resume(struct pci_dev *pdev)
2257{ 2308{
2258 struct net_device *dev = pci_get_drvdata(pdev); 2309 struct net_device *dev = pci_get_drvdata(pdev);
2259 struct rhine_private *rp = netdev_priv(dev); 2310 struct rhine_private *rp = netdev_priv(dev);
2260 unsigned long flags;
2261 int ret; 2311 int ret;
2262 2312
2263 if (!netif_running(dev)) 2313 if (!netif_running(dev))
2264 return 0; 2314 return 0;
2265 2315
2266 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
2267 netdev_err(dev, "request_irq failed\n");
2268
2269 ret = pci_set_power_state(pdev, PCI_D0); 2316 ret = pci_set_power_state(pdev, PCI_D0);
2270 if (debug > 1) 2317 if (debug > 1)
2271 netdev_info(dev, "Entering power state D0 %s (%d)\n", 2318 netdev_info(dev, "Entering power state D0 %s (%d)\n",
@@ -2273,7 +2320,6 @@ static int rhine_resume(struct pci_dev *pdev)
2273 2320
2274 pci_restore_state(pdev); 2321 pci_restore_state(pdev);
2275 2322
2276 spin_lock_irqsave(&rp->lock, flags);
2277#ifdef USE_MMIO 2323#ifdef USE_MMIO
2278 enable_mmio(rp->pioaddr, rp->quirks); 2324 enable_mmio(rp->pioaddr, rp->quirks);
2279#endif 2325#endif
@@ -2282,8 +2328,10 @@ static int rhine_resume(struct pci_dev *pdev)
2282 free_rbufs(dev); 2328 free_rbufs(dev);
2283 alloc_tbufs(dev); 2329 alloc_tbufs(dev);
2284 alloc_rbufs(dev); 2330 alloc_rbufs(dev);
2331 rhine_task_enable(rp);
2332 spin_lock_bh(&rp->lock);
2285 init_registers(dev); 2333 init_registers(dev);
2286 spin_unlock_irqrestore(&rp->lock, flags); 2334 spin_unlock_bh(&rp->lock);
2287 2335
2288 netif_device_attach(dev); 2336 netif_device_attach(dev);
2289 2337