diff options
author | David S. Miller <davem@davemloft.net> | 2010-05-04 02:33:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-04 02:33:05 -0400 |
commit | 0a12761bcd5646691c5d16dd93df84d1b8849285 (patch) | |
tree | 9e23f12299b0d85d425b81250089ebcd9431f865 | |
parent | 53f224cc5f8f650f8e8d86abbe990c93f12834c8 (diff) |
forcedeth: Kill NAPI config options.
All distributions enable it, therefore no significant body of users
are even testing the driver with it disabled. And making NAPI
configurable is heavily discouraged anyways.
I left the MSI-X interrupt enabling thing in an "#if 0" block
so hopefully someone can debug that and it can get re-enabled.
Probably it was just one of the NVIDIA chipset MSI erratas that
we work handle these days in the PCI quirks (see drivers/pci/quirks.c
and stuff like nvenet_msi_disable()).
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/Kconfig | 14 | ||||
-rw-r--r-- | drivers/net/forcedeth.c | 194 |
2 files changed, 1 insertions, 207 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index dbd26f992158..b9e7618a1473 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1453,20 +1453,6 @@ config FORCEDETH | |||
1453 | To compile this driver as a module, choose M here. The module | 1453 | To compile this driver as a module, choose M here. The module |
1454 | will be called forcedeth. | 1454 | will be called forcedeth. |
1455 | 1455 | ||
1456 | config FORCEDETH_NAPI | ||
1457 | bool "Use Rx Polling (NAPI) (EXPERIMENTAL)" | ||
1458 | depends on FORCEDETH && EXPERIMENTAL | ||
1459 | help | ||
1460 | NAPI is a new driver API designed to reduce CPU and interrupt load | ||
1461 | when the driver is receiving lots of packets from the card. It is | ||
1462 | still somewhat experimental and thus not yet enabled by default. | ||
1463 | |||
1464 | If your estimated Rx load is 10kpps or more, or if the card will be | ||
1465 | deployed on potentially unfriendly networks (e.g. in a firewall), | ||
1466 | then say Y here. | ||
1467 | |||
1468 | If in doubt, say N. | ||
1469 | |||
1470 | config CS89x0 | 1456 | config CS89x0 |
1471 | tristate "CS89x0 support" | 1457 | tristate "CS89x0 support" |
1472 | depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \ | 1458 | depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \ |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 4a24cc7d9555..f9e1dd48ac56 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1104,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) | |||
1104 | 1104 | ||
1105 | static void nv_napi_enable(struct net_device *dev) | 1105 | static void nv_napi_enable(struct net_device *dev) |
1106 | { | 1106 | { |
1107 | #ifdef CONFIG_FORCEDETH_NAPI | ||
1108 | struct fe_priv *np = get_nvpriv(dev); | 1107 | struct fe_priv *np = get_nvpriv(dev); |
1109 | 1108 | ||
1110 | napi_enable(&np->napi); | 1109 | napi_enable(&np->napi); |
1111 | #endif | ||
1112 | } | 1110 | } |
1113 | 1111 | ||
1114 | static void nv_napi_disable(struct net_device *dev) | 1112 | static void nv_napi_disable(struct net_device *dev) |
1115 | { | 1113 | { |
1116 | #ifdef CONFIG_FORCEDETH_NAPI | ||
1117 | struct fe_priv *np = get_nvpriv(dev); | 1114 | struct fe_priv *np = get_nvpriv(dev); |
1118 | 1115 | ||
1119 | napi_disable(&np->napi); | 1116 | napi_disable(&np->napi); |
1120 | #endif | ||
1121 | } | 1117 | } |
1122 | 1118 | ||
1123 | #define MII_READ (-1) | 1119 | #define MII_READ (-1) |
@@ -1810,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev) | |||
1810 | } | 1806 | } |
1811 | 1807 | ||
1812 | /* If rx bufs are exhausted called after 50ms to attempt to refresh */ | 1808 | /* If rx bufs are exhausted called after 50ms to attempt to refresh */ |
1813 | #ifdef CONFIG_FORCEDETH_NAPI | ||
1814 | static void nv_do_rx_refill(unsigned long data) | 1809 | static void nv_do_rx_refill(unsigned long data) |
1815 | { | 1810 | { |
1816 | struct net_device *dev = (struct net_device *) data; | 1811 | struct net_device *dev = (struct net_device *) data; |
@@ -1819,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data) | |||
1819 | /* Just reschedule NAPI rx processing */ | 1814 | /* Just reschedule NAPI rx processing */ |
1820 | napi_schedule(&np->napi); | 1815 | napi_schedule(&np->napi); |
1821 | } | 1816 | } |
1822 | #else | ||
1823 | static void nv_do_rx_refill(unsigned long data) | ||
1824 | { | ||
1825 | struct net_device *dev = (struct net_device *) data; | ||
1826 | struct fe_priv *np = netdev_priv(dev); | ||
1827 | int retcode; | ||
1828 | |||
1829 | if (!using_multi_irqs(dev)) { | ||
1830 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
1831 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
1832 | else | ||
1833 | disable_irq(np->pci_dev->irq); | ||
1834 | } else { | ||
1835 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1836 | } | ||
1837 | if (!nv_optimized(np)) | ||
1838 | retcode = nv_alloc_rx(dev); | ||
1839 | else | ||
1840 | retcode = nv_alloc_rx_optimized(dev); | ||
1841 | if (retcode) { | ||
1842 | spin_lock_irq(&np->lock); | ||
1843 | if (!np->in_shutdown) | ||
1844 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
1845 | spin_unlock_irq(&np->lock); | ||
1846 | } | ||
1847 | if (!using_multi_irqs(dev)) { | ||
1848 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
1849 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
1850 | else | ||
1851 | enable_irq(np->pci_dev->irq); | ||
1852 | } else { | ||
1853 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1854 | } | ||
1855 | } | ||
1856 | #endif | ||
1857 | 1817 | ||
1858 | static void nv_init_rx(struct net_device *dev) | 1818 | static void nv_init_rx(struct net_device *dev) |
1859 | { | 1819 | { |
@@ -2816,11 +2776,7 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2816 | skb->protocol = eth_type_trans(skb, dev); | 2776 | skb->protocol = eth_type_trans(skb, dev); |
2817 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", | 2777 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", |
2818 | dev->name, len, skb->protocol); | 2778 | dev->name, len, skb->protocol); |
2819 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2820 | napi_gro_receive(&np->napi, skb); | 2779 | napi_gro_receive(&np->napi, skb); |
2821 | #else | ||
2822 | netif_rx(skb); | ||
2823 | #endif | ||
2824 | dev->stats.rx_packets++; | 2780 | dev->stats.rx_packets++; |
2825 | dev->stats.rx_bytes += len; | 2781 | dev->stats.rx_bytes += len; |
2826 | next_pkt: | 2782 | next_pkt: |
@@ -2909,27 +2865,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2909 | dev->name, len, skb->protocol); | 2865 | dev->name, len, skb->protocol); |
2910 | 2866 | ||
2911 | if (likely(!np->vlangrp)) { | 2867 | if (likely(!np->vlangrp)) { |
2912 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2913 | napi_gro_receive(&np->napi, skb); | 2868 | napi_gro_receive(&np->napi, skb); |
2914 | #else | ||
2915 | netif_rx(skb); | ||
2916 | #endif | ||
2917 | } else { | 2869 | } else { |
2918 | vlanflags = le32_to_cpu(np->get_rx.ex->buflow); | 2870 | vlanflags = le32_to_cpu(np->get_rx.ex->buflow); |
2919 | if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { | 2871 | if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { |
2920 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2921 | vlan_gro_receive(&np->napi, np->vlangrp, | 2872 | vlan_gro_receive(&np->napi, np->vlangrp, |
2922 | vlanflags & NV_RX3_VLAN_TAG_MASK, skb); | 2873 | vlanflags & NV_RX3_VLAN_TAG_MASK, skb); |
2923 | #else | ||
2924 | vlan_hwaccel_rx(skb, np->vlangrp, | ||
2925 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2926 | #endif | ||
2927 | } else { | 2874 | } else { |
2928 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2929 | napi_gro_receive(&np->napi, skb); | 2875 | napi_gro_receive(&np->napi, skb); |
2930 | #else | ||
2931 | netif_rx(skb); | ||
2932 | #endif | ||
2933 | } | 2876 | } |
2934 | } | 2877 | } |
2935 | 2878 | ||
@@ -3496,10 +3439,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3496 | struct net_device *dev = (struct net_device *) data; | 3439 | struct net_device *dev = (struct net_device *) data; |
3497 | struct fe_priv *np = netdev_priv(dev); | 3440 | struct fe_priv *np = netdev_priv(dev); |
3498 | u8 __iomem *base = get_hwbase(dev); | 3441 | u8 __iomem *base = get_hwbase(dev); |
3499 | #ifndef CONFIG_FORCEDETH_NAPI | ||
3500 | int total_work = 0; | ||
3501 | int loop_count = 0; | ||
3502 | #endif | ||
3503 | 3442 | ||
3504 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); | 3443 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); |
3505 | 3444 | ||
@@ -3516,7 +3455,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3516 | 3455 | ||
3517 | nv_msi_workaround(np); | 3456 | nv_msi_workaround(np); |
3518 | 3457 | ||
3519 | #ifdef CONFIG_FORCEDETH_NAPI | ||
3520 | if (napi_schedule_prep(&np->napi)) { | 3458 | if (napi_schedule_prep(&np->napi)) { |
3521 | /* | 3459 | /* |
3522 | * Disable further irq's (msix not enabled with napi) | 3460 | * Disable further irq's (msix not enabled with napi) |
@@ -3525,65 +3463,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3525 | __napi_schedule(&np->napi); | 3463 | __napi_schedule(&np->napi); |
3526 | } | 3464 | } |
3527 | 3465 | ||
3528 | #else | ||
3529 | do | ||
3530 | { | ||
3531 | int work = 0; | ||
3532 | if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) { | ||
3533 | if (unlikely(nv_alloc_rx(dev))) { | ||
3534 | spin_lock(&np->lock); | ||
3535 | if (!np->in_shutdown) | ||
3536 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
3537 | spin_unlock(&np->lock); | ||
3538 | } | ||
3539 | } | ||
3540 | |||
3541 | spin_lock(&np->lock); | ||
3542 | work += nv_tx_done(dev, TX_WORK_PER_LOOP); | ||
3543 | spin_unlock(&np->lock); | ||
3544 | |||
3545 | if (!work) | ||
3546 | break; | ||
3547 | |||
3548 | total_work += work; | ||
3549 | |||
3550 | loop_count++; | ||
3551 | } | ||
3552 | while (loop_count < max_interrupt_work); | ||
3553 | |||
3554 | if (nv_change_interrupt_mode(dev, total_work)) { | ||
3555 | /* setup new irq mask */ | ||
3556 | writel(np->irqmask, base + NvRegIrqMask); | ||
3557 | } | ||
3558 | |||
3559 | if (unlikely(np->events & NVREG_IRQ_LINK)) { | ||
3560 | spin_lock(&np->lock); | ||
3561 | nv_link_irq(dev); | ||
3562 | spin_unlock(&np->lock); | ||
3563 | } | ||
3564 | if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { | ||
3565 | spin_lock(&np->lock); | ||
3566 | nv_linkchange(dev); | ||
3567 | spin_unlock(&np->lock); | ||
3568 | np->link_timeout = jiffies + LINK_TIMEOUT; | ||
3569 | } | ||
3570 | if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { | ||
3571 | spin_lock(&np->lock); | ||
3572 | /* disable interrupts on the nic */ | ||
3573 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | ||
3574 | writel(0, base + NvRegIrqMask); | ||
3575 | else | ||
3576 | writel(np->irqmask, base + NvRegIrqMask); | ||
3577 | pci_push(base); | ||
3578 | |||
3579 | if (!np->in_shutdown) { | ||
3580 | np->nic_poll_irq = np->irqmask; | ||
3581 | np->recover_error = 1; | ||
3582 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
3583 | } | ||
3584 | spin_unlock(&np->lock); | ||
3585 | } | ||
3586 | #endif | ||
3587 | dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); | 3466 | dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); |
3588 | 3467 | ||
3589 | return IRQ_HANDLED; | 3468 | return IRQ_HANDLED; |
@@ -3599,10 +3478,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3599 | struct net_device *dev = (struct net_device *) data; | 3478 | struct net_device *dev = (struct net_device *) data; |
3600 | struct fe_priv *np = netdev_priv(dev); | 3479 | struct fe_priv *np = netdev_priv(dev); |
3601 | u8 __iomem *base = get_hwbase(dev); | 3480 | u8 __iomem *base = get_hwbase(dev); |
3602 | #ifndef CONFIG_FORCEDETH_NAPI | ||
3603 | int total_work = 0; | ||
3604 | int loop_count = 0; | ||
3605 | #endif | ||
3606 | 3481 | ||
3607 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); | 3482 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); |
3608 | 3483 | ||
@@ -3619,7 +3494,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3619 | 3494 | ||
3620 | nv_msi_workaround(np); | 3495 | nv_msi_workaround(np); |
3621 | 3496 | ||
3622 | #ifdef CONFIG_FORCEDETH_NAPI | ||
3623 | if (napi_schedule_prep(&np->napi)) { | 3497 | if (napi_schedule_prep(&np->napi)) { |
3624 | /* | 3498 | /* |
3625 | * Disable further irq's (msix not enabled with napi) | 3499 | * Disable further irq's (msix not enabled with napi) |
@@ -3627,66 +3501,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3627 | writel(0, base + NvRegIrqMask); | 3501 | writel(0, base + NvRegIrqMask); |
3628 | __napi_schedule(&np->napi); | 3502 | __napi_schedule(&np->napi); |
3629 | } | 3503 | } |
3630 | #else | ||
3631 | do | ||
3632 | { | ||
3633 | int work = 0; | ||
3634 | if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) { | ||
3635 | if (unlikely(nv_alloc_rx_optimized(dev))) { | ||
3636 | spin_lock(&np->lock); | ||
3637 | if (!np->in_shutdown) | ||
3638 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
3639 | spin_unlock(&np->lock); | ||
3640 | } | ||
3641 | } | ||
3642 | |||
3643 | spin_lock(&np->lock); | ||
3644 | work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); | ||
3645 | spin_unlock(&np->lock); | ||
3646 | |||
3647 | if (!work) | ||
3648 | break; | ||
3649 | |||
3650 | total_work += work; | ||
3651 | |||
3652 | loop_count++; | ||
3653 | } | ||
3654 | while (loop_count < max_interrupt_work); | ||
3655 | |||
3656 | if (nv_change_interrupt_mode(dev, total_work)) { | ||
3657 | /* setup new irq mask */ | ||
3658 | writel(np->irqmask, base + NvRegIrqMask); | ||
3659 | } | ||
3660 | |||
3661 | if (unlikely(np->events & NVREG_IRQ_LINK)) { | ||
3662 | spin_lock(&np->lock); | ||
3663 | nv_link_irq(dev); | ||
3664 | spin_unlock(&np->lock); | ||
3665 | } | ||
3666 | if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { | ||
3667 | spin_lock(&np->lock); | ||
3668 | nv_linkchange(dev); | ||
3669 | spin_unlock(&np->lock); | ||
3670 | np->link_timeout = jiffies + LINK_TIMEOUT; | ||
3671 | } | ||
3672 | if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { | ||
3673 | spin_lock(&np->lock); | ||
3674 | /* disable interrupts on the nic */ | ||
3675 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | ||
3676 | writel(0, base + NvRegIrqMask); | ||
3677 | else | ||
3678 | writel(np->irqmask, base + NvRegIrqMask); | ||
3679 | pci_push(base); | ||
3680 | |||
3681 | if (!np->in_shutdown) { | ||
3682 | np->nic_poll_irq = np->irqmask; | ||
3683 | np->recover_error = 1; | ||
3684 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
3685 | } | ||
3686 | spin_unlock(&np->lock); | ||
3687 | } | ||
3688 | |||
3689 | #endif | ||
3690 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); | 3504 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); |
3691 | 3505 | ||
3692 | return IRQ_HANDLED; | 3506 | return IRQ_HANDLED; |
@@ -3735,7 +3549,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) | |||
3735 | return IRQ_RETVAL(i); | 3549 | return IRQ_RETVAL(i); |
3736 | } | 3550 | } |
3737 | 3551 | ||
3738 | #ifdef CONFIG_FORCEDETH_NAPI | ||
3739 | static int nv_napi_poll(struct napi_struct *napi, int budget) | 3552 | static int nv_napi_poll(struct napi_struct *napi, int budget) |
3740 | { | 3553 | { |
3741 | struct fe_priv *np = container_of(napi, struct fe_priv, napi); | 3554 | struct fe_priv *np = container_of(napi, struct fe_priv, napi); |
@@ -3805,7 +3618,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) | |||
3805 | } | 3618 | } |
3806 | return rx_work; | 3619 | return rx_work; |
3807 | } | 3620 | } |
3808 | #endif | ||
3809 | 3621 | ||
3810 | static irqreturn_t nv_nic_irq_rx(int foo, void *data) | 3622 | static irqreturn_t nv_nic_irq_rx(int foo, void *data) |
3811 | { | 3623 | { |
@@ -5711,9 +5523,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5711 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; | 5523 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
5712 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 5524 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
5713 | dev->features |= NETIF_F_TSO; | 5525 | dev->features |= NETIF_F_TSO; |
5714 | #ifdef CONFIG_FORCEDETH_NAPI | ||
5715 | dev->features |= NETIF_F_GRO; | 5526 | dev->features |= NETIF_F_GRO; |
5716 | #endif | ||
5717 | } | 5527 | } |
5718 | 5528 | ||
5719 | np->vlanctl_bits = 0; | 5529 | np->vlanctl_bits = 0; |
@@ -5766,9 +5576,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5766 | else | 5576 | else |
5767 | dev->netdev_ops = &nv_netdev_ops_optimized; | 5577 | dev->netdev_ops = &nv_netdev_ops_optimized; |
5768 | 5578 | ||
5769 | #ifdef CONFIG_FORCEDETH_NAPI | ||
5770 | netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); | 5579 | netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); |
5771 | #endif | ||
5772 | SET_ETHTOOL_OPS(dev, &ops); | 5580 | SET_ETHTOOL_OPS(dev, &ops); |
5773 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; | 5581 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; |
5774 | 5582 | ||
@@ -5871,7 +5679,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5871 | /* msix has had reported issues when modifying irqmask | 5679 | /* msix has had reported issues when modifying irqmask |
5872 | as in the case of napi, therefore, disable for now | 5680 | as in the case of napi, therefore, disable for now |
5873 | */ | 5681 | */ |
5874 | #ifndef CONFIG_FORCEDETH_NAPI | 5682 | #if 0 |
5875 | np->msi_flags |= NV_MSI_X_CAPABLE; | 5683 | np->msi_flags |= NV_MSI_X_CAPABLE; |
5876 | #endif | 5684 | #endif |
5877 | } | 5685 | } |