aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2009-03-05 03:02:26 -0500
committerDavid S. Miller <davem@davemloft.net>2009-03-10 08:29:49 -0400
commit4145ade2bb265b34331265bfa2221e40b069b3ca (patch)
tree5f1cf1c04fe11d083a362d614747c28134d08e82 /drivers/net/forcedeth.c
parentb67874ac1604cda8070d60d432e892c09d761b2e (diff)
forcedeth: add interrupt moderation logic
This patch adds the logic to moderate the interrupts by changing the mode between throughput and poll. If there has been a large amount of time without any burst of network load, the code will transition to pure throughput mode (where each tx/rx/other will cause an interrupt). If bursts of network load occurs, it will transition to poll based mode to help reduce cpu utilization (it will not interrupt on each packet) while maintaining the optimum network bandwidth utilization. Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c137
1 files changed, 106 insertions, 31 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 2c3a7f851f20..341a35117eae 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -593,6 +593,9 @@ union ring_type {
593 593
594#define NV_TX_LIMIT_COUNT 16 594#define NV_TX_LIMIT_COUNT 16
595 595
596#define NV_DYNAMIC_THRESHOLD 4
597#define NV_DYNAMIC_MAX_QUIET_COUNT 2048
598
596/* statistics */ 599/* statistics */
597struct nv_ethtool_str { 600struct nv_ethtool_str {
598 char name[ETH_GSTRING_LEN]; 601 char name[ETH_GSTRING_LEN];
@@ -750,6 +753,7 @@ struct fe_priv {
750 u16 gigabit; 753 u16 gigabit;
751 int intr_test; 754 int intr_test;
752 int recover_error; 755 int recover_error;
756 int quiet_count;
753 757
754 /* General data: RO fields */ 758 /* General data: RO fields */
755 dma_addr_t ring_addr; 759 dma_addr_t ring_addr;
@@ -832,7 +836,7 @@ struct fe_priv {
832 * Maximum number of loops until we assume that a bit in the irq mask 836 * Maximum number of loops until we assume that a bit in the irq mask
833 * is stuck. Overridable with module param. 837 * is stuck. Overridable with module param.
834 */ 838 */
835static int max_interrupt_work = 15; 839static int max_interrupt_work = 4;
836 840
837/* 841/*
838 * Optimization can be either throuput mode or cpu mode 842 * Optimization can be either throuput mode or cpu mode
@@ -3418,11 +3422,43 @@ static void nv_msi_workaround(struct fe_priv *np)
3418 } 3422 }
3419} 3423}
3420 3424
3425static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3426{
3427 struct fe_priv *np = netdev_priv(dev);
3428
3429 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3430 if (total_work > NV_DYNAMIC_THRESHOLD) {
3431 /* transition to poll based interrupts */
3432 np->quiet_count = 0;
3433 if (np->irqmask != NVREG_IRQMASK_CPU) {
3434 np->irqmask = NVREG_IRQMASK_CPU;
3435 return 1;
3436 }
3437 } else {
3438 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3439 np->quiet_count++;
3440 } else {
3441 /* reached a period of low activity, switch
3442 to per tx/rx packet interrupts */
3443 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3444 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3445 return 1;
3446 }
3447 }
3448 }
3449 }
3450 return 0;
3451}
3452
3421static irqreturn_t nv_nic_irq(int foo, void *data) 3453static irqreturn_t nv_nic_irq(int foo, void *data)
3422{ 3454{
3423 struct net_device *dev = (struct net_device *) data; 3455 struct net_device *dev = (struct net_device *) data;
3424 struct fe_priv *np = netdev_priv(dev); 3456 struct fe_priv *np = netdev_priv(dev);
3425 u8 __iomem *base = get_hwbase(dev); 3457 u8 __iomem *base = get_hwbase(dev);
3458#ifndef CONFIG_FORCEDETH_NAPI
3459 int total_work = 0;
3460 int loop_count = 0;
3461#endif
3426 3462
3427 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3463 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3428 3464
@@ -3449,19 +3485,35 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3449 3485
3450 spin_unlock(&np->lock); 3486 spin_unlock(&np->lock);
3451 3487
3452 return IRQ_HANDLED;
3453#else 3488#else
3454 spin_lock(&np->lock); 3489 do
3455 nv_tx_done(dev, np->tx_ring_size); 3490 {
3456 spin_unlock(&np->lock); 3491 int work = 0;
3457 3492 if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
3458 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { 3493 if (unlikely(nv_alloc_rx(dev))) {
3459 if (unlikely(nv_alloc_rx(dev))) { 3494 spin_lock(&np->lock);
3460 spin_lock(&np->lock); 3495 if (!np->in_shutdown)
3461 if (!np->in_shutdown) 3496 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3462 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3497 spin_unlock(&np->lock);
3463 spin_unlock(&np->lock); 3498 }
3464 } 3499 }
3500
3501 spin_lock(&np->lock);
3502 work += nv_tx_done(dev, TX_WORK_PER_LOOP);
3503 spin_unlock(&np->lock);
3504
3505 if (!work)
3506 break;
3507
3508 total_work += work;
3509
3510 loop_count++;
3511 }
3512 while (loop_count < max_interrupt_work);
3513
3514 if (nv_change_interrupt_mode(dev, total_work)) {
3515 /* setup new irq mask */
3516 writel(np->irqmask, base + NvRegIrqMask);
3465 } 3517 }
3466 3518
3467 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3519 if (unlikely(np->events & NVREG_IRQ_LINK)) {
@@ -3507,6 +3559,10 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3507 struct net_device *dev = (struct net_device *) data; 3559 struct net_device *dev = (struct net_device *) data;
3508 struct fe_priv *np = netdev_priv(dev); 3560 struct fe_priv *np = netdev_priv(dev);
3509 u8 __iomem *base = get_hwbase(dev); 3561 u8 __iomem *base = get_hwbase(dev);
3562#ifndef CONFIG_FORCEDETH_NAPI
3563 int total_work = 0;
3564 int loop_count = 0;
3565#endif
3510 3566
3511 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3567 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3512 3568
@@ -3533,19 +3589,35 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3533 3589
3534 spin_unlock(&np->lock); 3590 spin_unlock(&np->lock);
3535 3591
3536 return IRQ_HANDLED;
3537#else 3592#else
3538 spin_lock(&np->lock); 3593 do
3539 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3594 {
3540 spin_unlock(&np->lock); 3595 int work = 0;
3541 3596 if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
3542 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3597 if (unlikely(nv_alloc_rx_optimized(dev))) {
3543 if (unlikely(nv_alloc_rx_optimized(dev))) { 3598 spin_lock(&np->lock);
3544 spin_lock(&np->lock); 3599 if (!np->in_shutdown)
3545 if (!np->in_shutdown) 3600 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3546 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3601 spin_unlock(&np->lock);
3547 spin_unlock(&np->lock); 3602 }
3548 } 3603 }
3604
3605 spin_lock(&np->lock);
3606 work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3607 spin_unlock(&np->lock);
3608
3609 if (!work)
3610 break;
3611
3612 total_work += work;
3613
3614 loop_count++;
3615 }
3616 while (loop_count < max_interrupt_work);
3617
3618 if (nv_change_interrupt_mode(dev, total_work)) {
3619 /* setup new irq mask */
3620 writel(np->irqmask, base + NvRegIrqMask);
3549 } 3621 }
3550 3622
3551 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3623 if (unlikely(np->events & NVREG_IRQ_LINK)) {
@@ -3632,21 +3704,22 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3632 struct net_device *dev = np->dev; 3704 struct net_device *dev = np->dev;
3633 u8 __iomem *base = get_hwbase(dev); 3705 u8 __iomem *base = get_hwbase(dev);
3634 unsigned long flags; 3706 unsigned long flags;
3635 int pkts, retcode; 3707 int retcode;
3708 int tx_work, rx_work;
3636 3709
3637 if (!nv_optimized(np)) { 3710 if (!nv_optimized(np)) {
3638 spin_lock_irqsave(&np->lock, flags); 3711 spin_lock_irqsave(&np->lock, flags);
3639 nv_tx_done(dev, np->tx_ring_size); 3712 tx_work = nv_tx_done(dev, np->tx_ring_size);
3640 spin_unlock_irqrestore(&np->lock, flags); 3713 spin_unlock_irqrestore(&np->lock, flags);
3641 3714
3642 pkts = nv_rx_process(dev, budget); 3715 rx_work = nv_rx_process(dev, budget);
3643 retcode = nv_alloc_rx(dev); 3716 retcode = nv_alloc_rx(dev);
3644 } else { 3717 } else {
3645 spin_lock_irqsave(&np->lock, flags); 3718 spin_lock_irqsave(&np->lock, flags);
3646 nv_tx_done_optimized(dev, np->tx_ring_size); 3719 tx_work = nv_tx_done_optimized(dev, np->tx_ring_size);
3647 spin_unlock_irqrestore(&np->lock, flags); 3720 spin_unlock_irqrestore(&np->lock, flags);
3648 3721
3649 pkts = nv_rx_process_optimized(dev, budget); 3722 rx_work = nv_rx_process_optimized(dev, budget);
3650 retcode = nv_alloc_rx_optimized(dev); 3723 retcode = nv_alloc_rx_optimized(dev);
3651 } 3724 }
3652 3725
@@ -3657,6 +3730,8 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3657 spin_unlock_irqrestore(&np->lock, flags); 3730 spin_unlock_irqrestore(&np->lock, flags);
3658 } 3731 }
3659 3732
3733 nv_change_interrupt_mode(dev, tx_work + rx_work);
3734
3660 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3735 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3661 spin_lock_irqsave(&np->lock, flags); 3736 spin_lock_irqsave(&np->lock, flags);
3662 nv_link_irq(dev); 3737 nv_link_irq(dev);
@@ -3677,10 +3752,10 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3677 } 3752 }
3678 spin_unlock_irqrestore(&np->lock, flags); 3753 spin_unlock_irqrestore(&np->lock, flags);
3679 __napi_complete(napi); 3754 __napi_complete(napi);
3680 return pkts; 3755 return rx_work;
3681 } 3756 }
3682 3757
3683 if (pkts < budget) { 3758 if (rx_work < budget) {
3684 /* re-enable interrupts 3759 /* re-enable interrupts
3685 (msix not enabled in napi) */ 3760 (msix not enabled in napi) */
3686 spin_lock_irqsave(&np->lock, flags); 3761 spin_lock_irqsave(&np->lock, flags);
@@ -3691,7 +3766,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3691 3766
3692 spin_unlock_irqrestore(&np->lock, flags); 3767 spin_unlock_irqrestore(&np->lock, flags);
3693 } 3768 }
3694 return pkts; 3769 return rx_work;
3695} 3770}
3696#endif 3771#endif
3697 3772