aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2009-03-05 03:02:22 -0500
committerDavid S. Miller <davem@davemloft.net>2009-03-10 08:29:49 -0400
commitb67874ac1604cda8070d60d432e892c09d761b2e (patch)
tree3cc122fa300532f9bcea8bb10027f6e8af8c4238 /drivers/net/forcedeth.c
parent9e184767c956e71d9535c9fc8433e140f819d07d (diff)
forcedeth: remove isr processing loop
This patch is only a subset of changes so that it is easier to see the modifications. This patch removes the isr 'for' loop and shifts all the logic to account for new tab spacing. Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c252
1 files changed, 105 insertions, 147 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 07e245dab680..2c3a7f851f20 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3423,99 +3423,78 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3423 struct net_device *dev = (struct net_device *) data; 3423 struct net_device *dev = (struct net_device *) data;
3424 struct fe_priv *np = netdev_priv(dev); 3424 struct fe_priv *np = netdev_priv(dev);
3425 u8 __iomem *base = get_hwbase(dev); 3425 u8 __iomem *base = get_hwbase(dev);
3426 int i;
3427 3426
3428 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3427 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3429 3428
3430 for (i=0; ; i++) { 3429 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3431 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3430 np->events = readl(base + NvRegIrqStatus);
3432 np->events = readl(base + NvRegIrqStatus); 3431 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3433 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3432 } else {
3434 } else { 3433 np->events = readl(base + NvRegMSIXIrqStatus);
3435 np->events = readl(base + NvRegMSIXIrqStatus); 3434 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3436 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3435 }
3437 } 3436 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3438 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3437 if (!(np->events & np->irqmask))
3439 if (!(np->events & np->irqmask)) 3438 return IRQ_NONE;
3440 break;
3441 3439
3442 nv_msi_workaround(np); 3440 nv_msi_workaround(np);
3443 3441
3444#ifdef CONFIG_FORCEDETH_NAPI 3442#ifdef CONFIG_FORCEDETH_NAPI
3445 spin_lock(&np->lock); 3443 spin_lock(&np->lock);
3446 napi_schedule(&np->napi); 3444 napi_schedule(&np->napi);
3447 3445
3448 /* Disable furthur irq's 3446 /* Disable furthur irq's
3449 (msix not enabled with napi) */ 3447 (msix not enabled with napi) */
3450 writel(0, base + NvRegIrqMask); 3448 writel(0, base + NvRegIrqMask);
3451 3449
3452 spin_unlock(&np->lock); 3450 spin_unlock(&np->lock);
3453 3451
3454 return IRQ_HANDLED; 3452 return IRQ_HANDLED;
3455#else 3453#else
3456 spin_lock(&np->lock); 3454 spin_lock(&np->lock);
3457 nv_tx_done(dev, np->tx_ring_size); 3455 nv_tx_done(dev, np->tx_ring_size);
3458 spin_unlock(&np->lock); 3456 spin_unlock(&np->lock);
3459
3460 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3461 if (unlikely(nv_alloc_rx(dev))) {
3462 spin_lock(&np->lock);
3463 if (!np->in_shutdown)
3464 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3465 spin_unlock(&np->lock);
3466 }
3467 }
3468 3457
3469 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3458 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3459 if (unlikely(nv_alloc_rx(dev))) {
3470 spin_lock(&np->lock); 3460 spin_lock(&np->lock);
3471 nv_link_irq(dev); 3461 if (!np->in_shutdown)
3472 spin_unlock(&np->lock); 3462 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3473 }
3474 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3475 spin_lock(&np->lock);
3476 nv_linkchange(dev);
3477 spin_unlock(&np->lock); 3463 spin_unlock(&np->lock);
3478 np->link_timeout = jiffies + LINK_TIMEOUT;
3479 } 3464 }
3480 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3465 }
3481 spin_lock(&np->lock);
3482 /* disable interrupts on the nic */
3483 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3484 writel(0, base + NvRegIrqMask);
3485 else
3486 writel(np->irqmask, base + NvRegIrqMask);
3487 pci_push(base);
3488 3466
3489 if (!np->in_shutdown) { 3467 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3490 np->nic_poll_irq = np->irqmask; 3468 spin_lock(&np->lock);
3491 np->recover_error = 1; 3469 nv_link_irq(dev);
3492 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3470 spin_unlock(&np->lock);
3493 } 3471 }
3494 spin_unlock(&np->lock); 3472 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3495 break; 3473 spin_lock(&np->lock);
3496 } 3474 nv_linkchange(dev);
3497 if (unlikely(i > max_interrupt_work)) { 3475 spin_unlock(&np->lock);
3498 spin_lock(&np->lock); 3476 np->link_timeout = jiffies + LINK_TIMEOUT;
3499 /* disable interrupts on the nic */ 3477 }
3500 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3478 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3501 writel(0, base + NvRegIrqMask); 3479 spin_lock(&np->lock);
3502 else 3480 /* disable interrupts on the nic */
3503 writel(np->irqmask, base + NvRegIrqMask); 3481 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3504 pci_push(base); 3482 writel(0, base + NvRegIrqMask);
3483 else
3484 writel(np->irqmask, base + NvRegIrqMask);
3485 pci_push(base);
3505 3486
3506 if (!np->in_shutdown) { 3487 if (!np->in_shutdown) {
3507 np->nic_poll_irq = np->irqmask; 3488 np->nic_poll_irq = np->irqmask;
3508 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3489 np->recover_error = 1;
3509 } 3490 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3510 spin_unlock(&np->lock);
3511 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3512 break;
3513 } 3491 }
3514#endif 3492 spin_unlock(&np->lock);
3515 } 3493 }
3494#endif
3516 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3495 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3517 3496
3518 return IRQ_RETVAL(i); 3497 return IRQ_HANDLED;
3519} 3498}
3520 3499
3521/** 3500/**
@@ -3528,100 +3507,79 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3528 struct net_device *dev = (struct net_device *) data; 3507 struct net_device *dev = (struct net_device *) data;
3529 struct fe_priv *np = netdev_priv(dev); 3508 struct fe_priv *np = netdev_priv(dev);
3530 u8 __iomem *base = get_hwbase(dev); 3509 u8 __iomem *base = get_hwbase(dev);
3531 int i;
3532 3510
3533 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3511 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3534 3512
3535 for (i=0; ; i++) { 3513 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3536 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3514 np->events = readl(base + NvRegIrqStatus);
3537 np->events = readl(base + NvRegIrqStatus); 3515 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3538 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3516 } else {
3539 } else { 3517 np->events = readl(base + NvRegMSIXIrqStatus);
3540 np->events = readl(base + NvRegMSIXIrqStatus); 3518 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3541 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3519 }
3542 } 3520 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3543 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3521 if (!(np->events & np->irqmask))
3544 if (!(np->events & np->irqmask)) 3522 return IRQ_NONE;
3545 break;
3546 3523
3547 nv_msi_workaround(np); 3524 nv_msi_workaround(np);
3548 3525
3549#ifdef CONFIG_FORCEDETH_NAPI 3526#ifdef CONFIG_FORCEDETH_NAPI
3550 spin_lock(&np->lock); 3527 spin_lock(&np->lock);
3551 napi_schedule(&np->napi); 3528 napi_schedule(&np->napi);
3552 3529
3553 /* Disable furthur irq's 3530 /* Disable furthur irq's
3554 (msix not enabled with napi) */ 3531 (msix not enabled with napi) */
3555 writel(0, base + NvRegIrqMask); 3532 writel(0, base + NvRegIrqMask);
3556 3533
3557 spin_unlock(&np->lock); 3534 spin_unlock(&np->lock);
3558 3535
3559 return IRQ_HANDLED; 3536 return IRQ_HANDLED;
3560#else 3537#else
3561 spin_lock(&np->lock); 3538 spin_lock(&np->lock);
3562 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3539 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3563 spin_unlock(&np->lock); 3540 spin_unlock(&np->lock);
3564
3565 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3566 if (unlikely(nv_alloc_rx_optimized(dev))) {
3567 spin_lock(&np->lock);
3568 if (!np->in_shutdown)
3569 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3570 spin_unlock(&np->lock);
3571 }
3572 }
3573 3541
3574 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3542 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3543 if (unlikely(nv_alloc_rx_optimized(dev))) {
3575 spin_lock(&np->lock); 3544 spin_lock(&np->lock);
3576 nv_link_irq(dev); 3545 if (!np->in_shutdown)
3577 spin_unlock(&np->lock); 3546 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3578 }
3579 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3580 spin_lock(&np->lock);
3581 nv_linkchange(dev);
3582 spin_unlock(&np->lock);
3583 np->link_timeout = jiffies + LINK_TIMEOUT;
3584 }
3585 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3586 spin_lock(&np->lock);
3587 /* disable interrupts on the nic */
3588 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3589 writel(0, base + NvRegIrqMask);
3590 else
3591 writel(np->irqmask, base + NvRegIrqMask);
3592 pci_push(base);
3593
3594 if (!np->in_shutdown) {
3595 np->nic_poll_irq = np->irqmask;
3596 np->recover_error = 1;
3597 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3598 }
3599 spin_unlock(&np->lock); 3547 spin_unlock(&np->lock);
3600 break;
3601 } 3548 }
3549 }
3602 3550
3603 if (unlikely(i > max_interrupt_work)) { 3551 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3604 spin_lock(&np->lock); 3552 spin_lock(&np->lock);
3605 /* disable interrupts on the nic */ 3553 nv_link_irq(dev);
3606 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3554 spin_unlock(&np->lock);
3607 writel(0, base + NvRegIrqMask); 3555 }
3608 else 3556 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3609 writel(np->irqmask, base + NvRegIrqMask); 3557 spin_lock(&np->lock);
3610 pci_push(base); 3558 nv_linkchange(dev);
3559 spin_unlock(&np->lock);
3560 np->link_timeout = jiffies + LINK_TIMEOUT;
3561 }
3562 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3563 spin_lock(&np->lock);
3564 /* disable interrupts on the nic */
3565 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3566 writel(0, base + NvRegIrqMask);
3567 else
3568 writel(np->irqmask, base + NvRegIrqMask);
3569 pci_push(base);
3611 3570
3612 if (!np->in_shutdown) { 3571 if (!np->in_shutdown) {
3613 np->nic_poll_irq = np->irqmask; 3572 np->nic_poll_irq = np->irqmask;
3614 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3573 np->recover_error = 1;
3615 } 3574 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3616 spin_unlock(&np->lock);
3617 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3618 break;
3619 } 3575 }
3620#endif 3576 spin_unlock(&np->lock);
3621 } 3577 }
3578
3579#endif
3622 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3580 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3623 3581
3624 return IRQ_RETVAL(i); 3582 return IRQ_HANDLED;
3625} 3583}
3626 3584
3627static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3585static irqreturn_t nv_nic_irq_tx(int foo, void *data)