aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c235
1 files changed, 25 insertions, 210 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index a1c0e7bb70e8..e282d0ae6a3d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1104,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1104 1104
1105static void nv_napi_enable(struct net_device *dev) 1105static void nv_napi_enable(struct net_device *dev)
1106{ 1106{
1107#ifdef CONFIG_FORCEDETH_NAPI
1108 struct fe_priv *np = get_nvpriv(dev); 1107 struct fe_priv *np = get_nvpriv(dev);
1109 1108
1110 napi_enable(&np->napi); 1109 napi_enable(&np->napi);
1111#endif
1112} 1110}
1113 1111
1114static void nv_napi_disable(struct net_device *dev) 1112static void nv_napi_disable(struct net_device *dev)
1115{ 1113{
1116#ifdef CONFIG_FORCEDETH_NAPI
1117 struct fe_priv *np = get_nvpriv(dev); 1114 struct fe_priv *np = get_nvpriv(dev);
1118 1115
1119 napi_disable(&np->napi); 1116 napi_disable(&np->napi);
1120#endif
1121} 1117}
1122 1118
1123#define MII_READ (-1) 1119#define MII_READ (-1)
@@ -1810,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1810} 1806}
1811 1807
1812/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1808/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1813#ifdef CONFIG_FORCEDETH_NAPI
1814static void nv_do_rx_refill(unsigned long data) 1809static void nv_do_rx_refill(unsigned long data)
1815{ 1810{
1816 struct net_device *dev = (struct net_device *) data; 1811 struct net_device *dev = (struct net_device *) data;
@@ -1819,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data)
1819 /* Just reschedule NAPI rx processing */ 1814 /* Just reschedule NAPI rx processing */
1820 napi_schedule(&np->napi); 1815 napi_schedule(&np->napi);
1821} 1816}
1822#else
1823static void nv_do_rx_refill(unsigned long data)
1824{
1825 struct net_device *dev = (struct net_device *) data;
1826 struct fe_priv *np = netdev_priv(dev);
1827 int retcode;
1828
1829 if (!using_multi_irqs(dev)) {
1830 if (np->msi_flags & NV_MSI_X_ENABLED)
1831 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1832 else
1833 disable_irq(np->pci_dev->irq);
1834 } else {
1835 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1836 }
1837 if (!nv_optimized(np))
1838 retcode = nv_alloc_rx(dev);
1839 else
1840 retcode = nv_alloc_rx_optimized(dev);
1841 if (retcode) {
1842 spin_lock_irq(&np->lock);
1843 if (!np->in_shutdown)
1844 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1845 spin_unlock_irq(&np->lock);
1846 }
1847 if (!using_multi_irqs(dev)) {
1848 if (np->msi_flags & NV_MSI_X_ENABLED)
1849 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1850 else
1851 enable_irq(np->pci_dev->irq);
1852 } else {
1853 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1854 }
1855}
1856#endif
1857 1817
1858static void nv_init_rx(struct net_device *dev) 1818static void nv_init_rx(struct net_device *dev)
1859{ 1819{
@@ -2816,11 +2776,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2816 skb->protocol = eth_type_trans(skb, dev); 2776 skb->protocol = eth_type_trans(skb, dev);
2817 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2777 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2818 dev->name, len, skb->protocol); 2778 dev->name, len, skb->protocol);
2819#ifdef CONFIG_FORCEDETH_NAPI 2779 napi_gro_receive(&np->napi, skb);
2820 netif_receive_skb(skb);
2821#else
2822 netif_rx(skb);
2823#endif
2824 dev->stats.rx_packets++; 2780 dev->stats.rx_packets++;
2825 dev->stats.rx_bytes += len; 2781 dev->stats.rx_bytes += len;
2826next_pkt: 2782next_pkt:
@@ -2909,27 +2865,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2909 dev->name, len, skb->protocol); 2865 dev->name, len, skb->protocol);
2910 2866
2911 if (likely(!np->vlangrp)) { 2867 if (likely(!np->vlangrp)) {
2912#ifdef CONFIG_FORCEDETH_NAPI 2868 napi_gro_receive(&np->napi, skb);
2913 netif_receive_skb(skb);
2914#else
2915 netif_rx(skb);
2916#endif
2917 } else { 2869 } else {
2918 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2870 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2919 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2871 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2920#ifdef CONFIG_FORCEDETH_NAPI 2872 vlan_gro_receive(&np->napi, np->vlangrp,
2921 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2873 vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
2922 vlanflags & NV_RX3_VLAN_TAG_MASK);
2923#else
2924 vlan_hwaccel_rx(skb, np->vlangrp,
2925 vlanflags & NV_RX3_VLAN_TAG_MASK);
2926#endif
2927 } else { 2874 } else {
2928#ifdef CONFIG_FORCEDETH_NAPI 2875 napi_gro_receive(&np->napi, skb);
2929 netif_receive_skb(skb);
2930#else
2931 netif_rx(skb);
2932#endif
2933 } 2876 }
2934 } 2877 }
2935 2878
@@ -3496,10 +3439,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3496 struct net_device *dev = (struct net_device *) data; 3439 struct net_device *dev = (struct net_device *) data;
3497 struct fe_priv *np = netdev_priv(dev); 3440 struct fe_priv *np = netdev_priv(dev);
3498 u8 __iomem *base = get_hwbase(dev); 3441 u8 __iomem *base = get_hwbase(dev);
3499#ifndef CONFIG_FORCEDETH_NAPI
3500 int total_work = 0;
3501 int loop_count = 0;
3502#endif
3503 3442
3504 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3443 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3505 3444
@@ -3516,7 +3455,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3516 3455
3517 nv_msi_workaround(np); 3456 nv_msi_workaround(np);
3518 3457
3519#ifdef CONFIG_FORCEDETH_NAPI
3520 if (napi_schedule_prep(&np->napi)) { 3458 if (napi_schedule_prep(&np->napi)) {
3521 /* 3459 /*
3522 * Disable further irq's (msix not enabled with napi) 3460 * Disable further irq's (msix not enabled with napi)
@@ -3525,65 +3463,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3525 __napi_schedule(&np->napi); 3463 __napi_schedule(&np->napi);
3526 } 3464 }
3527 3465
3528#else
3529 do
3530 {
3531 int work = 0;
3532 if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
3533 if (unlikely(nv_alloc_rx(dev))) {
3534 spin_lock(&np->lock);
3535 if (!np->in_shutdown)
3536 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3537 spin_unlock(&np->lock);
3538 }
3539 }
3540
3541 spin_lock(&np->lock);
3542 work += nv_tx_done(dev, TX_WORK_PER_LOOP);
3543 spin_unlock(&np->lock);
3544
3545 if (!work)
3546 break;
3547
3548 total_work += work;
3549
3550 loop_count++;
3551 }
3552 while (loop_count < max_interrupt_work);
3553
3554 if (nv_change_interrupt_mode(dev, total_work)) {
3555 /* setup new irq mask */
3556 writel(np->irqmask, base + NvRegIrqMask);
3557 }
3558
3559 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3560 spin_lock(&np->lock);
3561 nv_link_irq(dev);
3562 spin_unlock(&np->lock);
3563 }
3564 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3565 spin_lock(&np->lock);
3566 nv_linkchange(dev);
3567 spin_unlock(&np->lock);
3568 np->link_timeout = jiffies + LINK_TIMEOUT;
3569 }
3570 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3571 spin_lock(&np->lock);
3572 /* disable interrupts on the nic */
3573 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3574 writel(0, base + NvRegIrqMask);
3575 else
3576 writel(np->irqmask, base + NvRegIrqMask);
3577 pci_push(base);
3578
3579 if (!np->in_shutdown) {
3580 np->nic_poll_irq = np->irqmask;
3581 np->recover_error = 1;
3582 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3583 }
3584 spin_unlock(&np->lock);
3585 }
3586#endif
3587 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3466 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3588 3467
3589 return IRQ_HANDLED; 3468 return IRQ_HANDLED;
@@ -3599,10 +3478,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3599 struct net_device *dev = (struct net_device *) data; 3478 struct net_device *dev = (struct net_device *) data;
3600 struct fe_priv *np = netdev_priv(dev); 3479 struct fe_priv *np = netdev_priv(dev);
3601 u8 __iomem *base = get_hwbase(dev); 3480 u8 __iomem *base = get_hwbase(dev);
3602#ifndef CONFIG_FORCEDETH_NAPI
3603 int total_work = 0;
3604 int loop_count = 0;
3605#endif
3606 3481
3607 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3482 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3608 3483
@@ -3619,7 +3494,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3619 3494
3620 nv_msi_workaround(np); 3495 nv_msi_workaround(np);
3621 3496
3622#ifdef CONFIG_FORCEDETH_NAPI
3623 if (napi_schedule_prep(&np->napi)) { 3497 if (napi_schedule_prep(&np->napi)) {
3624 /* 3498 /*
3625 * Disable further irq's (msix not enabled with napi) 3499 * Disable further irq's (msix not enabled with napi)
@@ -3627,66 +3501,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3627 writel(0, base + NvRegIrqMask); 3501 writel(0, base + NvRegIrqMask);
3628 __napi_schedule(&np->napi); 3502 __napi_schedule(&np->napi);
3629 } 3503 }
3630#else
3631 do
3632 {
3633 int work = 0;
3634 if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
3635 if (unlikely(nv_alloc_rx_optimized(dev))) {
3636 spin_lock(&np->lock);
3637 if (!np->in_shutdown)
3638 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3639 spin_unlock(&np->lock);
3640 }
3641 }
3642
3643 spin_lock(&np->lock);
3644 work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3645 spin_unlock(&np->lock);
3646
3647 if (!work)
3648 break;
3649
3650 total_work += work;
3651
3652 loop_count++;
3653 }
3654 while (loop_count < max_interrupt_work);
3655
3656 if (nv_change_interrupt_mode(dev, total_work)) {
3657 /* setup new irq mask */
3658 writel(np->irqmask, base + NvRegIrqMask);
3659 }
3660
3661 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3662 spin_lock(&np->lock);
3663 nv_link_irq(dev);
3664 spin_unlock(&np->lock);
3665 }
3666 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3667 spin_lock(&np->lock);
3668 nv_linkchange(dev);
3669 spin_unlock(&np->lock);
3670 np->link_timeout = jiffies + LINK_TIMEOUT;
3671 }
3672 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3673 spin_lock(&np->lock);
3674 /* disable interrupts on the nic */
3675 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3676 writel(0, base + NvRegIrqMask);
3677 else
3678 writel(np->irqmask, base + NvRegIrqMask);
3679 pci_push(base);
3680
3681 if (!np->in_shutdown) {
3682 np->nic_poll_irq = np->irqmask;
3683 np->recover_error = 1;
3684 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3685 }
3686 spin_unlock(&np->lock);
3687 }
3688
3689#endif
3690 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3504 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3691 3505
3692 return IRQ_HANDLED; 3506 return IRQ_HANDLED;
@@ -3735,7 +3549,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3735 return IRQ_RETVAL(i); 3549 return IRQ_RETVAL(i);
3736} 3550}
3737 3551
3738#ifdef CONFIG_FORCEDETH_NAPI
3739static int nv_napi_poll(struct napi_struct *napi, int budget) 3552static int nv_napi_poll(struct napi_struct *napi, int budget)
3740{ 3553{
3741 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3554 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
@@ -3743,23 +3556,27 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3743 u8 __iomem *base = get_hwbase(dev); 3556 u8 __iomem *base = get_hwbase(dev);
3744 unsigned long flags; 3557 unsigned long flags;
3745 int retcode; 3558 int retcode;
3746 int tx_work, rx_work; 3559 int rx_count, tx_work=0, rx_work=0;
3747 3560
3748 if (!nv_optimized(np)) { 3561 do {
3749 spin_lock_irqsave(&np->lock, flags); 3562 if (!nv_optimized(np)) {
3750 tx_work = nv_tx_done(dev, np->tx_ring_size); 3563 spin_lock_irqsave(&np->lock, flags);
3751 spin_unlock_irqrestore(&np->lock, flags); 3564 tx_work += nv_tx_done(dev, np->tx_ring_size);
3565 spin_unlock_irqrestore(&np->lock, flags);
3752 3566
3753 rx_work = nv_rx_process(dev, budget); 3567 rx_count = nv_rx_process(dev, budget - rx_work);
3754 retcode = nv_alloc_rx(dev); 3568 retcode = nv_alloc_rx(dev);
3755 } else { 3569 } else {
3756 spin_lock_irqsave(&np->lock, flags); 3570 spin_lock_irqsave(&np->lock, flags);
3757 tx_work = nv_tx_done_optimized(dev, np->tx_ring_size); 3571 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3758 spin_unlock_irqrestore(&np->lock, flags); 3572 spin_unlock_irqrestore(&np->lock, flags);
3759 3573
3760 rx_work = nv_rx_process_optimized(dev, budget); 3574 rx_count = nv_rx_process_optimized(dev,
3761 retcode = nv_alloc_rx_optimized(dev); 3575 budget - rx_work);
3762 } 3576 retcode = nv_alloc_rx_optimized(dev);
3577 }
3578 } while (retcode == 0 &&
3579 rx_count > 0 && (rx_work += rx_count) < budget);
3763 3580
3764 if (retcode) { 3581 if (retcode) {
3765 spin_lock_irqsave(&np->lock, flags); 3582 spin_lock_irqsave(&np->lock, flags);
@@ -3802,7 +3619,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3802 } 3619 }
3803 return rx_work; 3620 return rx_work;
3804} 3621}
3805#endif
3806 3622
3807static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3623static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3808{ 3624{
@@ -5708,6 +5524,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5708 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5524 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5709 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 5525 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
5710 dev->features |= NETIF_F_TSO; 5526 dev->features |= NETIF_F_TSO;
5527 dev->features |= NETIF_F_GRO;
5711 } 5528 }
5712 5529
5713 np->vlanctl_bits = 0; 5530 np->vlanctl_bits = 0;
@@ -5760,9 +5577,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5760 else 5577 else
5761 dev->netdev_ops = &nv_netdev_ops_optimized; 5578 dev->netdev_ops = &nv_netdev_ops_optimized;
5762 5579
5763#ifdef CONFIG_FORCEDETH_NAPI
5764 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5580 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5765#endif
5766 SET_ETHTOOL_OPS(dev, &ops); 5581 SET_ETHTOOL_OPS(dev, &ops);
5767 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5582 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5768 5583
@@ -5865,7 +5680,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5865 /* msix has had reported issues when modifying irqmask 5680 /* msix has had reported issues when modifying irqmask
5866 as in the case of napi, therefore, disable for now 5681 as in the case of napi, therefore, disable for now
5867 */ 5682 */
5868#ifndef CONFIG_FORCEDETH_NAPI 5683#if 0
5869 np->msi_flags |= NV_MSI_X_CAPABLE; 5684 np->msi_flags |= NV_MSI_X_CAPABLE;
5870#endif 5685#endif
5871 } 5686 }