diff options
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 251 |
1 files changed, 33 insertions, 218 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 5c98f7c22425..268ea4d566d7 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1104,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) | |||
1104 | 1104 | ||
1105 | static void nv_napi_enable(struct net_device *dev) | 1105 | static void nv_napi_enable(struct net_device *dev) |
1106 | { | 1106 | { |
1107 | #ifdef CONFIG_FORCEDETH_NAPI | ||
1108 | struct fe_priv *np = get_nvpriv(dev); | 1107 | struct fe_priv *np = get_nvpriv(dev); |
1109 | 1108 | ||
1110 | napi_enable(&np->napi); | 1109 | napi_enable(&np->napi); |
1111 | #endif | ||
1112 | } | 1110 | } |
1113 | 1111 | ||
1114 | static void nv_napi_disable(struct net_device *dev) | 1112 | static void nv_napi_disable(struct net_device *dev) |
1115 | { | 1113 | { |
1116 | #ifdef CONFIG_FORCEDETH_NAPI | ||
1117 | struct fe_priv *np = get_nvpriv(dev); | 1114 | struct fe_priv *np = get_nvpriv(dev); |
1118 | 1115 | ||
1119 | napi_disable(&np->napi); | 1116 | napi_disable(&np->napi); |
1120 | #endif | ||
1121 | } | 1117 | } |
1122 | 1118 | ||
1123 | #define MII_READ (-1) | 1119 | #define MII_READ (-1) |
@@ -1810,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev) | |||
1810 | } | 1806 | } |
1811 | 1807 | ||
1812 | /* If rx bufs are exhausted called after 50ms to attempt to refresh */ | 1808 | /* If rx bufs are exhausted called after 50ms to attempt to refresh */ |
1813 | #ifdef CONFIG_FORCEDETH_NAPI | ||
1814 | static void nv_do_rx_refill(unsigned long data) | 1809 | static void nv_do_rx_refill(unsigned long data) |
1815 | { | 1810 | { |
1816 | struct net_device *dev = (struct net_device *) data; | 1811 | struct net_device *dev = (struct net_device *) data; |
@@ -1819,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data) | |||
1819 | /* Just reschedule NAPI rx processing */ | 1814 | /* Just reschedule NAPI rx processing */ |
1820 | napi_schedule(&np->napi); | 1815 | napi_schedule(&np->napi); |
1821 | } | 1816 | } |
1822 | #else | ||
1823 | static void nv_do_rx_refill(unsigned long data) | ||
1824 | { | ||
1825 | struct net_device *dev = (struct net_device *) data; | ||
1826 | struct fe_priv *np = netdev_priv(dev); | ||
1827 | int retcode; | ||
1828 | |||
1829 | if (!using_multi_irqs(dev)) { | ||
1830 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
1831 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
1832 | else | ||
1833 | disable_irq(np->pci_dev->irq); | ||
1834 | } else { | ||
1835 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1836 | } | ||
1837 | if (!nv_optimized(np)) | ||
1838 | retcode = nv_alloc_rx(dev); | ||
1839 | else | ||
1840 | retcode = nv_alloc_rx_optimized(dev); | ||
1841 | if (retcode) { | ||
1842 | spin_lock_irq(&np->lock); | ||
1843 | if (!np->in_shutdown) | ||
1844 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
1845 | spin_unlock_irq(&np->lock); | ||
1846 | } | ||
1847 | if (!using_multi_irqs(dev)) { | ||
1848 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
1849 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
1850 | else | ||
1851 | enable_irq(np->pci_dev->irq); | ||
1852 | } else { | ||
1853 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1854 | } | ||
1855 | } | ||
1856 | #endif | ||
1857 | 1817 | ||
1858 | static void nv_init_rx(struct net_device *dev) | 1818 | static void nv_init_rx(struct net_device *dev) |
1859 | { | 1819 | { |
@@ -2148,7 +2108,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2148 | unsigned int i; | 2108 | unsigned int i; |
2149 | u32 offset = 0; | 2109 | u32 offset = 0; |
2150 | u32 bcnt; | 2110 | u32 bcnt; |
2151 | u32 size = skb->len-skb->data_len; | 2111 | u32 size = skb_headlen(skb); |
2152 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 2112 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2153 | u32 empty_slots; | 2113 | u32 empty_slots; |
2154 | struct ring_desc* put_tx; | 2114 | struct ring_desc* put_tx; |
@@ -2254,7 +2214,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2254 | dprintk("\n"); | 2214 | dprintk("\n"); |
2255 | } | 2215 | } |
2256 | 2216 | ||
2257 | dev->trans_start = jiffies; | ||
2258 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 2217 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
2259 | return NETDEV_TX_OK; | 2218 | return NETDEV_TX_OK; |
2260 | } | 2219 | } |
@@ -2269,7 +2228,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
2269 | unsigned int i; | 2228 | unsigned int i; |
2270 | u32 offset = 0; | 2229 | u32 offset = 0; |
2271 | u32 bcnt; | 2230 | u32 bcnt; |
2272 | u32 size = skb->len-skb->data_len; | 2231 | u32 size = skb_headlen(skb); |
2273 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 2232 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2274 | u32 empty_slots; | 2233 | u32 empty_slots; |
2275 | struct ring_desc_ex* put_tx; | 2234 | struct ring_desc_ex* put_tx; |
@@ -2409,7 +2368,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
2409 | dprintk("\n"); | 2368 | dprintk("\n"); |
2410 | } | 2369 | } |
2411 | 2370 | ||
2412 | dev->trans_start = jiffies; | ||
2413 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 2371 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
2414 | return NETDEV_TX_OK; | 2372 | return NETDEV_TX_OK; |
2415 | } | 2373 | } |
@@ -2816,11 +2774,7 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2816 | skb->protocol = eth_type_trans(skb, dev); | 2774 | skb->protocol = eth_type_trans(skb, dev); |
2817 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", | 2775 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", |
2818 | dev->name, len, skb->protocol); | 2776 | dev->name, len, skb->protocol); |
2819 | #ifdef CONFIG_FORCEDETH_NAPI | 2777 | napi_gro_receive(&np->napi, skb); |
2820 | netif_receive_skb(skb); | ||
2821 | #else | ||
2822 | netif_rx(skb); | ||
2823 | #endif | ||
2824 | dev->stats.rx_packets++; | 2778 | dev->stats.rx_packets++; |
2825 | dev->stats.rx_bytes += len; | 2779 | dev->stats.rx_bytes += len; |
2826 | next_pkt: | 2780 | next_pkt: |
@@ -2909,27 +2863,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2909 | dev->name, len, skb->protocol); | 2863 | dev->name, len, skb->protocol); |
2910 | 2864 | ||
2911 | if (likely(!np->vlangrp)) { | 2865 | if (likely(!np->vlangrp)) { |
2912 | #ifdef CONFIG_FORCEDETH_NAPI | 2866 | napi_gro_receive(&np->napi, skb); |
2913 | netif_receive_skb(skb); | ||
2914 | #else | ||
2915 | netif_rx(skb); | ||
2916 | #endif | ||
2917 | } else { | 2867 | } else { |
2918 | vlanflags = le32_to_cpu(np->get_rx.ex->buflow); | 2868 | vlanflags = le32_to_cpu(np->get_rx.ex->buflow); |
2919 | if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { | 2869 | if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { |
2920 | #ifdef CONFIG_FORCEDETH_NAPI | 2870 | vlan_gro_receive(&np->napi, np->vlangrp, |
2921 | vlan_hwaccel_receive_skb(skb, np->vlangrp, | 2871 | vlanflags & NV_RX3_VLAN_TAG_MASK, skb); |
2922 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2923 | #else | ||
2924 | vlan_hwaccel_rx(skb, np->vlangrp, | ||
2925 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2926 | #endif | ||
2927 | } else { | 2872 | } else { |
2928 | #ifdef CONFIG_FORCEDETH_NAPI | 2873 | napi_gro_receive(&np->napi, skb); |
2929 | netif_receive_skb(skb); | ||
2930 | #else | ||
2931 | netif_rx(skb); | ||
2932 | #endif | ||
2933 | } | 2874 | } |
2934 | } | 2875 | } |
2935 | 2876 | ||
@@ -3104,12 +3045,14 @@ static void nv_set_multicast(struct net_device *dev) | |||
3104 | if (dev->flags & IFF_ALLMULTI) { | 3045 | if (dev->flags & IFF_ALLMULTI) { |
3105 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; | 3046 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; |
3106 | } else { | 3047 | } else { |
3107 | struct dev_mc_list *walk; | 3048 | struct netdev_hw_addr *ha; |
3108 | 3049 | ||
3109 | netdev_for_each_mc_addr(walk, dev) { | 3050 | netdev_for_each_mc_addr(ha, dev) { |
3051 | unsigned char *addr = ha->addr; | ||
3110 | u32 a, b; | 3052 | u32 a, b; |
3111 | a = le32_to_cpu(*(__le32 *) walk->dmi_addr); | 3053 | |
3112 | b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); | 3054 | a = le32_to_cpu(*(__le32 *) addr); |
3055 | b = le16_to_cpu(*(__le16 *) (&addr[4])); | ||
3113 | alwaysOn[0] &= a; | 3056 | alwaysOn[0] &= a; |
3114 | alwaysOff[0] &= ~a; | 3057 | alwaysOff[0] &= ~a; |
3115 | alwaysOn[1] &= b; | 3058 | alwaysOn[1] &= b; |
@@ -3494,10 +3437,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3494 | struct net_device *dev = (struct net_device *) data; | 3437 | struct net_device *dev = (struct net_device *) data; |
3495 | struct fe_priv *np = netdev_priv(dev); | 3438 | struct fe_priv *np = netdev_priv(dev); |
3496 | u8 __iomem *base = get_hwbase(dev); | 3439 | u8 __iomem *base = get_hwbase(dev); |
3497 | #ifndef CONFIG_FORCEDETH_NAPI | ||
3498 | int total_work = 0; | ||
3499 | int loop_count = 0; | ||
3500 | #endif | ||
3501 | 3440 | ||
3502 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); | 3441 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); |
3503 | 3442 | ||
@@ -3514,7 +3453,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3514 | 3453 | ||
3515 | nv_msi_workaround(np); | 3454 | nv_msi_workaround(np); |
3516 | 3455 | ||
3517 | #ifdef CONFIG_FORCEDETH_NAPI | ||
3518 | if (napi_schedule_prep(&np->napi)) { | 3456 | if (napi_schedule_prep(&np->napi)) { |
3519 | /* | 3457 | /* |
3520 | * Disable further irq's (msix not enabled with napi) | 3458 | * Disable further irq's (msix not enabled with napi) |
@@ -3523,65 +3461,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3523 | __napi_schedule(&np->napi); | 3461 | __napi_schedule(&np->napi); |
3524 | } | 3462 | } |
3525 | 3463 | ||
3526 | #else | ||
3527 | do | ||
3528 | { | ||
3529 | int work = 0; | ||
3530 | if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) { | ||
3531 | if (unlikely(nv_alloc_rx(dev))) { | ||
3532 | spin_lock(&np->lock); | ||
3533 | if (!np->in_shutdown) | ||
3534 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
3535 | spin_unlock(&np->lock); | ||
3536 | } | ||
3537 | } | ||
3538 | |||
3539 | spin_lock(&np->lock); | ||
3540 | work += nv_tx_done(dev, TX_WORK_PER_LOOP); | ||
3541 | spin_unlock(&np->lock); | ||
3542 | |||
3543 | if (!work) | ||
3544 | break; | ||
3545 | |||
3546 | total_work += work; | ||
3547 | |||
3548 | loop_count++; | ||
3549 | } | ||
3550 | while (loop_count < max_interrupt_work); | ||
3551 | |||
3552 | if (nv_change_interrupt_mode(dev, total_work)) { | ||
3553 | /* setup new irq mask */ | ||
3554 | writel(np->irqmask, base + NvRegIrqMask); | ||
3555 | } | ||
3556 | |||
3557 | if (unlikely(np->events & NVREG_IRQ_LINK)) { | ||
3558 | spin_lock(&np->lock); | ||
3559 | nv_link_irq(dev); | ||
3560 | spin_unlock(&np->lock); | ||
3561 | } | ||
3562 | if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { | ||
3563 | spin_lock(&np->lock); | ||
3564 | nv_linkchange(dev); | ||
3565 | spin_unlock(&np->lock); | ||
3566 | np->link_timeout = jiffies + LINK_TIMEOUT; | ||
3567 | } | ||
3568 | if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { | ||
3569 | spin_lock(&np->lock); | ||
3570 | /* disable interrupts on the nic */ | ||
3571 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | ||
3572 | writel(0, base + NvRegIrqMask); | ||
3573 | else | ||
3574 | writel(np->irqmask, base + NvRegIrqMask); | ||
3575 | pci_push(base); | ||
3576 | |||
3577 | if (!np->in_shutdown) { | ||
3578 | np->nic_poll_irq = np->irqmask; | ||
3579 | np->recover_error = 1; | ||
3580 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
3581 | } | ||
3582 | spin_unlock(&np->lock); | ||
3583 | } | ||
3584 | #endif | ||
3585 | dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); | 3464 | dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); |
3586 | 3465 | ||
3587 | return IRQ_HANDLED; | 3466 | return IRQ_HANDLED; |
@@ -3597,10 +3476,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3597 | struct net_device *dev = (struct net_device *) data; | 3476 | struct net_device *dev = (struct net_device *) data; |
3598 | struct fe_priv *np = netdev_priv(dev); | 3477 | struct fe_priv *np = netdev_priv(dev); |
3599 | u8 __iomem *base = get_hwbase(dev); | 3478 | u8 __iomem *base = get_hwbase(dev); |
3600 | #ifndef CONFIG_FORCEDETH_NAPI | ||
3601 | int total_work = 0; | ||
3602 | int loop_count = 0; | ||
3603 | #endif | ||
3604 | 3479 | ||
3605 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); | 3480 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); |
3606 | 3481 | ||
@@ -3617,7 +3492,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3617 | 3492 | ||
3618 | nv_msi_workaround(np); | 3493 | nv_msi_workaround(np); |
3619 | 3494 | ||
3620 | #ifdef CONFIG_FORCEDETH_NAPI | ||
3621 | if (napi_schedule_prep(&np->napi)) { | 3495 | if (napi_schedule_prep(&np->napi)) { |
3622 | /* | 3496 | /* |
3623 | * Disable further irq's (msix not enabled with napi) | 3497 | * Disable further irq's (msix not enabled with napi) |
@@ -3625,66 +3499,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3625 | writel(0, base + NvRegIrqMask); | 3499 | writel(0, base + NvRegIrqMask); |
3626 | __napi_schedule(&np->napi); | 3500 | __napi_schedule(&np->napi); |
3627 | } | 3501 | } |
3628 | #else | ||
3629 | do | ||
3630 | { | ||
3631 | int work = 0; | ||
3632 | if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) { | ||
3633 | if (unlikely(nv_alloc_rx_optimized(dev))) { | ||
3634 | spin_lock(&np->lock); | ||
3635 | if (!np->in_shutdown) | ||
3636 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
3637 | spin_unlock(&np->lock); | ||
3638 | } | ||
3639 | } | ||
3640 | |||
3641 | spin_lock(&np->lock); | ||
3642 | work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); | ||
3643 | spin_unlock(&np->lock); | ||
3644 | |||
3645 | if (!work) | ||
3646 | break; | ||
3647 | |||
3648 | total_work += work; | ||
3649 | |||
3650 | loop_count++; | ||
3651 | } | ||
3652 | while (loop_count < max_interrupt_work); | ||
3653 | |||
3654 | if (nv_change_interrupt_mode(dev, total_work)) { | ||
3655 | /* setup new irq mask */ | ||
3656 | writel(np->irqmask, base + NvRegIrqMask); | ||
3657 | } | ||
3658 | |||
3659 | if (unlikely(np->events & NVREG_IRQ_LINK)) { | ||
3660 | spin_lock(&np->lock); | ||
3661 | nv_link_irq(dev); | ||
3662 | spin_unlock(&np->lock); | ||
3663 | } | ||
3664 | if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { | ||
3665 | spin_lock(&np->lock); | ||
3666 | nv_linkchange(dev); | ||
3667 | spin_unlock(&np->lock); | ||
3668 | np->link_timeout = jiffies + LINK_TIMEOUT; | ||
3669 | } | ||
3670 | if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { | ||
3671 | spin_lock(&np->lock); | ||
3672 | /* disable interrupts on the nic */ | ||
3673 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | ||
3674 | writel(0, base + NvRegIrqMask); | ||
3675 | else | ||
3676 | writel(np->irqmask, base + NvRegIrqMask); | ||
3677 | pci_push(base); | ||
3678 | |||
3679 | if (!np->in_shutdown) { | ||
3680 | np->nic_poll_irq = np->irqmask; | ||
3681 | np->recover_error = 1; | ||
3682 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
3683 | } | ||
3684 | spin_unlock(&np->lock); | ||
3685 | } | ||
3686 | |||
3687 | #endif | ||
3688 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); | 3502 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); |
3689 | 3503 | ||
3690 | return IRQ_HANDLED; | 3504 | return IRQ_HANDLED; |
@@ -3733,7 +3547,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) | |||
3733 | return IRQ_RETVAL(i); | 3547 | return IRQ_RETVAL(i); |
3734 | } | 3548 | } |
3735 | 3549 | ||
3736 | #ifdef CONFIG_FORCEDETH_NAPI | ||
3737 | static int nv_napi_poll(struct napi_struct *napi, int budget) | 3550 | static int nv_napi_poll(struct napi_struct *napi, int budget) |
3738 | { | 3551 | { |
3739 | struct fe_priv *np = container_of(napi, struct fe_priv, napi); | 3552 | struct fe_priv *np = container_of(napi, struct fe_priv, napi); |
@@ -3741,23 +3554,27 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) | |||
3741 | u8 __iomem *base = get_hwbase(dev); | 3554 | u8 __iomem *base = get_hwbase(dev); |
3742 | unsigned long flags; | 3555 | unsigned long flags; |
3743 | int retcode; | 3556 | int retcode; |
3744 | int tx_work, rx_work; | 3557 | int rx_count, tx_work=0, rx_work=0; |
3745 | 3558 | ||
3746 | if (!nv_optimized(np)) { | 3559 | do { |
3747 | spin_lock_irqsave(&np->lock, flags); | 3560 | if (!nv_optimized(np)) { |
3748 | tx_work = nv_tx_done(dev, np->tx_ring_size); | 3561 | spin_lock_irqsave(&np->lock, flags); |
3749 | spin_unlock_irqrestore(&np->lock, flags); | 3562 | tx_work += nv_tx_done(dev, np->tx_ring_size); |
3563 | spin_unlock_irqrestore(&np->lock, flags); | ||
3750 | 3564 | ||
3751 | rx_work = nv_rx_process(dev, budget); | 3565 | rx_count = nv_rx_process(dev, budget - rx_work); |
3752 | retcode = nv_alloc_rx(dev); | 3566 | retcode = nv_alloc_rx(dev); |
3753 | } else { | 3567 | } else { |
3754 | spin_lock_irqsave(&np->lock, flags); | 3568 | spin_lock_irqsave(&np->lock, flags); |
3755 | tx_work = nv_tx_done_optimized(dev, np->tx_ring_size); | 3569 | tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); |
3756 | spin_unlock_irqrestore(&np->lock, flags); | 3570 | spin_unlock_irqrestore(&np->lock, flags); |
3757 | 3571 | ||
3758 | rx_work = nv_rx_process_optimized(dev, budget); | 3572 | rx_count = nv_rx_process_optimized(dev, |
3759 | retcode = nv_alloc_rx_optimized(dev); | 3573 | budget - rx_work); |
3760 | } | 3574 | retcode = nv_alloc_rx_optimized(dev); |
3575 | } | ||
3576 | } while (retcode == 0 && | ||
3577 | rx_count > 0 && (rx_work += rx_count) < budget); | ||
3761 | 3578 | ||
3762 | if (retcode) { | 3579 | if (retcode) { |
3763 | spin_lock_irqsave(&np->lock, flags); | 3580 | spin_lock_irqsave(&np->lock, flags); |
@@ -3800,7 +3617,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) | |||
3800 | } | 3617 | } |
3801 | return rx_work; | 3618 | return rx_work; |
3802 | } | 3619 | } |
3803 | #endif | ||
3804 | 3620 | ||
3805 | static irqreturn_t nv_nic_irq_rx(int foo, void *data) | 3621 | static irqreturn_t nv_nic_irq_rx(int foo, void *data) |
3806 | { | 3622 | { |
@@ -5706,6 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5706 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; | 5522 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
5707 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 5523 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
5708 | dev->features |= NETIF_F_TSO; | 5524 | dev->features |= NETIF_F_TSO; |
5525 | dev->features |= NETIF_F_GRO; | ||
5709 | } | 5526 | } |
5710 | 5527 | ||
5711 | np->vlanctl_bits = 0; | 5528 | np->vlanctl_bits = 0; |
@@ -5758,9 +5575,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5758 | else | 5575 | else |
5759 | dev->netdev_ops = &nv_netdev_ops_optimized; | 5576 | dev->netdev_ops = &nv_netdev_ops_optimized; |
5760 | 5577 | ||
5761 | #ifdef CONFIG_FORCEDETH_NAPI | ||
5762 | netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); | 5578 | netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); |
5763 | #endif | ||
5764 | SET_ETHTOOL_OPS(dev, &ops); | 5579 | SET_ETHTOOL_OPS(dev, &ops); |
5765 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; | 5580 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; |
5766 | 5581 | ||
@@ -5863,7 +5678,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5863 | /* msix has had reported issues when modifying irqmask | 5678 | /* msix has had reported issues when modifying irqmask |
5864 | as in the case of napi, therefore, disable for now | 5679 | as in the case of napi, therefore, disable for now |
5865 | */ | 5680 | */ |
5866 | #ifndef CONFIG_FORCEDETH_NAPI | 5681 | #if 0 |
5867 | np->msi_flags |= NV_MSI_X_CAPABLE; | 5682 | np->msi_flags |= NV_MSI_X_CAPABLE; |
5868 | #endif | 5683 | #endif |
5869 | } | 5684 | } |