aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/r8169.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/r8169.c')
-rw-r--r--drivers/net/r8169.c102
1 files changed, 57 insertions, 45 deletions
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7fa88d2b4c99..e94316b7868b 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3561,54 +3561,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
3561 int handled = 0; 3561 int handled = 0;
3562 int status; 3562 int status;
3563 3563
3564 /* loop handling interrupts until we have no new ones or
3565 * we hit a invalid/hotplug case.
3566 */
3564 status = RTL_R16(IntrStatus); 3567 status = RTL_R16(IntrStatus);
3568 while (status && status != 0xffff) {
3569 handled = 1;
3565 3570
3566 /* hotplug/major error/no more work/shared irq */ 3571 /* Handle all of the error cases first. These will reset
3567 if ((status == 0xffff) || !status) 3572 * the chip, so just exit the loop.
3568 goto out; 3573 */
3569 3574 if (unlikely(!netif_running(dev))) {
3570 handled = 1; 3575 rtl8169_asic_down(ioaddr);
3576 break;
3577 }
3571 3578
3572 if (unlikely(!netif_running(dev))) { 3579 /* Work around for rx fifo overflow */
3573 rtl8169_asic_down(ioaddr); 3580 if (unlikely(status & RxFIFOOver) &&
3574 goto out; 3581 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
3575 } 3582 netif_stop_queue(dev);
3583 rtl8169_tx_timeout(dev);
3584 break;
3585 }
3576 3586
3577 status &= tp->intr_mask; 3587 if (unlikely(status & SYSErr)) {
3578 RTL_W16(IntrStatus, 3588 rtl8169_pcierr_interrupt(dev);
3579 (status & RxFIFOOver) ? (status | RxOverflow) : status); 3589 break;
3590 }
3580 3591
3581 if (!(status & tp->intr_event)) 3592 if (status & LinkChg)
3582 goto out; 3593 rtl8169_check_link_status(dev, tp, ioaddr);
3583 3594
3584 /* Work around for rx fifo overflow */ 3595 /* We need to see the lastest version of tp->intr_mask to
3585 if (unlikely(status & RxFIFOOver) && 3596 * avoid ignoring an MSI interrupt and having to wait for
3586 (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 3597 * another event which may never come.
3587 netif_stop_queue(dev); 3598 */
3588 rtl8169_tx_timeout(dev); 3599 smp_rmb();
3589 goto out; 3600 if (status & tp->intr_mask & tp->napi_event) {
3590 } 3601 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
3602 tp->intr_mask = ~tp->napi_event;
3603
3604 if (likely(napi_schedule_prep(&tp->napi)))
3605 __napi_schedule(&tp->napi);
3606 else if (netif_msg_intr(tp)) {
3607 printk(KERN_INFO "%s: interrupt %04x in poll\n",
3608 dev->name, status);
3609 }
3610 }
3591 3611
3592 if (unlikely(status & SYSErr)) { 3612 /* We only get a new MSI interrupt when all active irq
3593 rtl8169_pcierr_interrupt(dev); 3613 * sources on the chip have been acknowledged. So, ack
3594 goto out; 3614 * everything we've seen and check if new sources have become
3615 * active to avoid blocking all interrupts from the chip.
3616 */
3617 RTL_W16(IntrStatus,
3618 (status & RxFIFOOver) ? (status | RxOverflow) : status);
3619 status = RTL_R16(IntrStatus);
3595 } 3620 }
3596 3621
3597 if (status & LinkChg)
3598 rtl8169_check_link_status(dev, tp, ioaddr);
3599
3600 if (status & tp->napi_event) {
3601 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
3602 tp->intr_mask = ~tp->napi_event;
3603
3604 if (likely(napi_schedule_prep(&tp->napi)))
3605 __napi_schedule(&tp->napi);
3606 else if (netif_msg_intr(tp)) {
3607 printk(KERN_INFO "%s: interrupt %04x in poll\n",
3608 dev->name, status);
3609 }
3610 }
3611out:
3612 return IRQ_RETVAL(handled); 3622 return IRQ_RETVAL(handled);
3613} 3623}
3614 3624
@@ -3624,13 +3634,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
3624 3634
3625 if (work_done < budget) { 3635 if (work_done < budget) {
3626 napi_complete(napi); 3636 napi_complete(napi);
3627 tp->intr_mask = 0xffff; 3637
3628 /* 3638 /* We need for force the visibility of tp->intr_mask
3629 * 20040426: the barrier is not strictly required but the 3639 * for other CPUs, as we can loose an MSI interrupt
3630 * behavior of the irq handler could be less predictable 3640 * and potentially wait for a retransmit timeout if we don't.
3631 * without it. Btw, the lack of flush for the posted pci 3641 * The posted write to IntrMask is safe, as it will
3632 * write is safe - FR 3642 * eventually make it to the chip and we won't loose anything
3643 * until it does.
3633 */ 3644 */
3645 tp->intr_mask = 0xffff;
3634 smp_wmb(); 3646 smp_wmb();
3635 RTL_W16(IntrMask, tp->intr_event); 3647 RTL_W16(IntrMask, tp->intr_event);
3636 } 3648 }