aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2009-03-05 03:02:14 -0500
committerDavid S. Miller <davem@davemloft.net>2009-03-10 08:29:48 -0400
commitf27e6f39fc37f378f5485b9a0ff905796e789f80 (patch)
tree0f0632bc4a8a1f92daa432cfae36df0355494856 /drivers
parent33912e72d00c3627dbbb7c59463df9535176059f (diff)
forcedeth: napi - handle all processing
The napi poll routine has been modified to handle all interrupt events and process them accordingly. Therefore, the ISR will now only schedule the napi poll and disable all interrupts instead of just disabling rx interrupt. Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/forcedeth.c98
1 files changed, 60 insertions, 38 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 78c2fe185281..6b6765431eaf 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3440,25 +3440,22 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3440 3440
3441 nv_msi_workaround(np); 3441 nv_msi_workaround(np);
3442 3442
3443#ifdef CONFIG_FORCEDETH_NAPI
3443 spin_lock(&np->lock); 3444 spin_lock(&np->lock);
3444 nv_tx_done(dev, np->tx_ring_size); 3445 napi_schedule(&np->napi);
3445 spin_unlock(&np->lock);
3446 3446
3447#ifdef CONFIG_FORCEDETH_NAPI 3447 /* Disable furthur irq's
3448 if (np->events & NVREG_IRQ_RX_ALL) { 3448 (msix not enabled with napi) */
3449 spin_lock(&np->lock); 3449 writel(0, base + NvRegIrqMask);
3450 napi_schedule(&np->napi);
3451 3450
3452 /* Disable furthur receive irq's */ 3451 spin_unlock(&np->lock);
3453 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3454 3452
3455 if (np->msi_flags & NV_MSI_X_ENABLED) 3453 return IRQ_HANDLED;
3456 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3457 else
3458 writel(np->irqmask, base + NvRegIrqMask);
3459 spin_unlock(&np->lock);
3460 }
3461#else 3454#else
3455 spin_lock(&np->lock);
3456 nv_tx_done(dev, np->tx_ring_size);
3457 spin_unlock(&np->lock);
3458
3462 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { 3459 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3463 if (unlikely(nv_alloc_rx(dev))) { 3460 if (unlikely(nv_alloc_rx(dev))) {
3464 spin_lock(&np->lock); 3461 spin_lock(&np->lock);
@@ -3467,7 +3464,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3467 spin_unlock(&np->lock); 3464 spin_unlock(&np->lock);
3468 } 3465 }
3469 } 3466 }
3470#endif 3467
3471 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3468 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3472 spin_lock(&np->lock); 3469 spin_lock(&np->lock);
3473 nv_link_irq(dev); 3470 nv_link_irq(dev);
@@ -3513,7 +3510,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3513 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3510 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3514 break; 3511 break;
3515 } 3512 }
3516 3513#endif
3517 } 3514 }
3518 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3515 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3519 3516
@@ -3548,25 +3545,22 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3548 3545
3549 nv_msi_workaround(np); 3546 nv_msi_workaround(np);
3550 3547
3548#ifdef CONFIG_FORCEDETH_NAPI
3551 spin_lock(&np->lock); 3549 spin_lock(&np->lock);
3552 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3550 napi_schedule(&np->napi);
3553 spin_unlock(&np->lock);
3554 3551
3555#ifdef CONFIG_FORCEDETH_NAPI 3552 /* Disable furthur irq's
3556 if (np->events & NVREG_IRQ_RX_ALL) { 3553 (msix not enabled with napi) */
3557 spin_lock(&np->lock); 3554 writel(0, base + NvRegIrqMask);
3558 napi_schedule(&np->napi);
3559 3555
3560 /* Disable furthur receive irq's */ 3556 spin_unlock(&np->lock);
3561 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3562 3557
3563 if (np->msi_flags & NV_MSI_X_ENABLED) 3558 return IRQ_HANDLED;
3564 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3565 else
3566 writel(np->irqmask, base + NvRegIrqMask);
3567 spin_unlock(&np->lock);
3568 }
3569#else 3559#else
3560 spin_lock(&np->lock);
3561 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3562 spin_unlock(&np->lock);
3563
3570 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3564 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3571 if (unlikely(nv_alloc_rx_optimized(dev))) { 3565 if (unlikely(nv_alloc_rx_optimized(dev))) {
3572 spin_lock(&np->lock); 3566 spin_lock(&np->lock);
@@ -3575,7 +3569,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3575 spin_unlock(&np->lock); 3569 spin_unlock(&np->lock);
3576 } 3570 }
3577 } 3571 }
3578#endif 3572
3579 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3573 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3580 spin_lock(&np->lock); 3574 spin_lock(&np->lock);
3581 nv_link_irq(dev); 3575 nv_link_irq(dev);
@@ -3622,7 +3616,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3622 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 3616 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3623 break; 3617 break;
3624 } 3618 }
3625 3619#endif
3626 } 3620 }
3627 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3621 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3628 3622
@@ -3682,9 +3676,17 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3682 int pkts, retcode; 3676 int pkts, retcode;
3683 3677
3684 if (!nv_optimized(np)) { 3678 if (!nv_optimized(np)) {
3679 spin_lock_irqsave(&np->lock, flags);
3680 nv_tx_done(dev, np->tx_ring_size);
3681 spin_unlock_irqrestore(&np->lock, flags);
3682
3685 pkts = nv_rx_process(dev, budget); 3683 pkts = nv_rx_process(dev, budget);
3686 retcode = nv_alloc_rx(dev); 3684 retcode = nv_alloc_rx(dev);
3687 } else { 3685 } else {
3686 spin_lock_irqsave(&np->lock, flags);
3687 nv_tx_done_optimized(dev, np->tx_ring_size);
3688 spin_unlock_irqrestore(&np->lock, flags);
3689
3688 pkts = nv_rx_process_optimized(dev, budget); 3690 pkts = nv_rx_process_optimized(dev, budget);
3689 retcode = nv_alloc_rx_optimized(dev); 3691 retcode = nv_alloc_rx_optimized(dev);
3690 } 3692 }
@@ -3696,17 +3698,37 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3696 spin_unlock_irqrestore(&np->lock, flags); 3698 spin_unlock_irqrestore(&np->lock, flags);
3697 } 3699 }
3698 3700
3701 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3702 spin_lock_irqsave(&np->lock, flags);
3703 nv_link_irq(dev);
3704 spin_unlock_irqrestore(&np->lock, flags);
3705 }
3706 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3707 spin_lock_irqsave(&np->lock, flags);
3708 nv_linkchange(dev);
3709 spin_unlock_irqrestore(&np->lock, flags);
3710 np->link_timeout = jiffies + LINK_TIMEOUT;
3711 }
3712 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3713 spin_lock_irqsave(&np->lock, flags);
3714 if (!np->in_shutdown) {
3715 np->nic_poll_irq = np->irqmask;
3716 np->recover_error = 1;
3717 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3718 }
3719 spin_unlock_irqrestore(&np->lock, flags);
3720 __napi_complete(napi);
3721 return pkts;
3722 }
3723
3699 if (pkts < budget) { 3724 if (pkts < budget) {
3700 /* re-enable receive interrupts */ 3725 /* re-enable interrupts
3726 (msix not enabled in napi) */
3701 spin_lock_irqsave(&np->lock, flags); 3727 spin_lock_irqsave(&np->lock, flags);
3702 3728
3703 __napi_complete(napi); 3729 __napi_complete(napi);
3704 3730
3705 np->irqmask |= NVREG_IRQ_RX_ALL; 3731 writel(np->irqmask, base + NvRegIrqMask);
3706 if (np->msi_flags & NV_MSI_X_ENABLED)
3707 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3708 else
3709 writel(np->irqmask, base + NvRegIrqMask);
3710 3732
3711 spin_unlock_irqrestore(&np->lock, flags); 3733 spin_unlock_irqrestore(&np->lock, flags);
3712 } 3734 }