aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/r8169.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/r8169.c')
-rw-r--r--drivers/net/r8169.c102
1 files changed, 57 insertions, 45 deletions
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0b6e8c896835..8247a945a1d9 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3554,54 +3554,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
3554 int handled = 0; 3554 int handled = 0;
3555 int status; 3555 int status;
3556 3556
3557 /* loop handling interrupts until we have no new ones or
3558 * we hit a invalid/hotplug case.
3559 */
3557 status = RTL_R16(IntrStatus); 3560 status = RTL_R16(IntrStatus);
3561 while (status && status != 0xffff) {
3562 handled = 1;
3558 3563
3559 /* hotplug/major error/no more work/shared irq */ 3564 /* Handle all of the error cases first. These will reset
3560 if ((status == 0xffff) || !status) 3565 * the chip, so just exit the loop.
3561 goto out; 3566 */
3562 3567 if (unlikely(!netif_running(dev))) {
3563 handled = 1; 3568 rtl8169_asic_down(ioaddr);
3569 break;
3570 }
3564 3571
3565 if (unlikely(!netif_running(dev))) { 3572 /* Work around for rx fifo overflow */
3566 rtl8169_asic_down(ioaddr); 3573 if (unlikely(status & RxFIFOOver) &&
3567 goto out; 3574 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
3568 } 3575 netif_stop_queue(dev);
3576 rtl8169_tx_timeout(dev);
3577 break;
3578 }
3569 3579
3570 status &= tp->intr_mask; 3580 if (unlikely(status & SYSErr)) {
3571 RTL_W16(IntrStatus, 3581 rtl8169_pcierr_interrupt(dev);
3572 (status & RxFIFOOver) ? (status | RxOverflow) : status); 3582 break;
3583 }
3573 3584
3574 if (!(status & tp->intr_event)) 3585 if (status & LinkChg)
3575 goto out; 3586 rtl8169_check_link_status(dev, tp, ioaddr);
3576 3587
3577 /* Work around for rx fifo overflow */ 3588 /* We need to see the lastest version of tp->intr_mask to
3578 if (unlikely(status & RxFIFOOver) && 3589 * avoid ignoring an MSI interrupt and having to wait for
3579 (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 3590 * another event which may never come.
3580 netif_stop_queue(dev); 3591 */
3581 rtl8169_tx_timeout(dev); 3592 smp_rmb();
3582 goto out; 3593 if (status & tp->intr_mask & tp->napi_event) {
3583 } 3594 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
3595 tp->intr_mask = ~tp->napi_event;
3596
3597 if (likely(napi_schedule_prep(&tp->napi)))
3598 __napi_schedule(&tp->napi);
3599 else if (netif_msg_intr(tp)) {
3600 printk(KERN_INFO "%s: interrupt %04x in poll\n",
3601 dev->name, status);
3602 }
3603 }
3584 3604
3585 if (unlikely(status & SYSErr)) { 3605 /* We only get a new MSI interrupt when all active irq
3586 rtl8169_pcierr_interrupt(dev); 3606 * sources on the chip have been acknowledged. So, ack
3587 goto out; 3607 * everything we've seen and check if new sources have become
3608 * active to avoid blocking all interrupts from the chip.
3609 */
3610 RTL_W16(IntrStatus,
3611 (status & RxFIFOOver) ? (status | RxOverflow) : status);
3612 status = RTL_R16(IntrStatus);
3588 } 3613 }
3589 3614
3590 if (status & LinkChg)
3591 rtl8169_check_link_status(dev, tp, ioaddr);
3592
3593 if (status & tp->napi_event) {
3594 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
3595 tp->intr_mask = ~tp->napi_event;
3596
3597 if (likely(napi_schedule_prep(&tp->napi)))
3598 __napi_schedule(&tp->napi);
3599 else if (netif_msg_intr(tp)) {
3600 printk(KERN_INFO "%s: interrupt %04x in poll\n",
3601 dev->name, status);
3602 }
3603 }
3604out:
3605 return IRQ_RETVAL(handled); 3615 return IRQ_RETVAL(handled);
3606} 3616}
3607 3617
@@ -3617,13 +3627,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
3617 3627
3618 if (work_done < budget) { 3628 if (work_done < budget) {
3619 napi_complete(napi); 3629 napi_complete(napi);
3620 tp->intr_mask = 0xffff; 3630
3621 /* 3631 /* We need for force the visibility of tp->intr_mask
3622 * 20040426: the barrier is not strictly required but the 3632 * for other CPUs, as we can loose an MSI interrupt
3623 * behavior of the irq handler could be less predictable 3633 * and potentially wait for a retransmit timeout if we don't.
3624 * without it. Btw, the lack of flush for the posted pci 3634 * The posted write to IntrMask is safe, as it will
3625 * write is safe - FR 3635 * eventually make it to the chip and we won't loose anything
3636 * until it does.
3626 */ 3637 */
3638 tp->intr_mask = 0xffff;
3627 smp_wmb(); 3639 smp_wmb();
3628 RTL_W16(IntrMask, tp->intr_event); 3640 RTL_W16(IntrMask, tp->intr_event);
3629 } 3641 }