aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2007-01-21 18:10:57 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:48 -0500
commitf0734ab658390380079369f7090dcf7aa226f394 (patch)
tree83a6a4a9146ca9342e0698c3abba1d1df91858ce /drivers/net/forcedeth.c
parentb01867cbd1853995946c8c838eff93a0885d8bc6 (diff)
forcedeth: irq data path optimization
This patch optimizes the irq data paths and cleans up the code. Signed-Off-By: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c149
1 files changed, 77 insertions, 72 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index fd91071bbc82..7c0f0ccbbb29 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2777,7 +2777,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2777 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2777 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2778 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2778 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2779 } 2779 }
2780 pci_push(base);
2781 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2780 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2782 if (!(events & np->irqmask)) 2781 if (!(events & np->irqmask))
2783 break; 2782 break;
@@ -2786,22 +2785,46 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2786 nv_tx_done(dev); 2785 nv_tx_done(dev);
2787 spin_unlock(&np->lock); 2786 spin_unlock(&np->lock);
2788 2787
2789 if (events & NVREG_IRQ_LINK) { 2788#ifdef CONFIG_FORCEDETH_NAPI
2789 if (events & NVREG_IRQ_RX_ALL) {
2790 netif_rx_schedule(dev);
2791
2792 /* Disable furthur receive irq's */
2793 spin_lock(&np->lock);
2794 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2795
2796 if (np->msi_flags & NV_MSI_X_ENABLED)
2797 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2798 else
2799 writel(np->irqmask, base + NvRegIrqMask);
2800 spin_unlock(&np->lock);
2801 }
2802#else
2803 if (nv_rx_process(dev, dev->weight)) {
2804 if (unlikely(nv_alloc_rx(dev))) {
2805 spin_lock(&np->lock);
2806 if (!np->in_shutdown)
2807 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2808 spin_unlock(&np->lock);
2809 }
2810 }
2811#endif
2812 if (unlikely(events & NVREG_IRQ_LINK)) {
2790 spin_lock(&np->lock); 2813 spin_lock(&np->lock);
2791 nv_link_irq(dev); 2814 nv_link_irq(dev);
2792 spin_unlock(&np->lock); 2815 spin_unlock(&np->lock);
2793 } 2816 }
2794 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2817 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2795 spin_lock(&np->lock); 2818 spin_lock(&np->lock);
2796 nv_linkchange(dev); 2819 nv_linkchange(dev);
2797 spin_unlock(&np->lock); 2820 spin_unlock(&np->lock);
2798 np->link_timeout = jiffies + LINK_TIMEOUT; 2821 np->link_timeout = jiffies + LINK_TIMEOUT;
2799 } 2822 }
2800 if (events & (NVREG_IRQ_TX_ERR)) { 2823 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2801 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2824 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2802 dev->name, events); 2825 dev->name, events);
2803 } 2826 }
2804 if (events & (NVREG_IRQ_UNKNOWN)) { 2827 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
2805 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2828 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2806 dev->name, events); 2829 dev->name, events);
2807 } 2830 }
@@ -2822,30 +2845,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2822 spin_unlock(&np->lock); 2845 spin_unlock(&np->lock);
2823 break; 2846 break;
2824 } 2847 }
2825#ifdef CONFIG_FORCEDETH_NAPI 2848 if (unlikely(i > max_interrupt_work)) {
2826 if (events & NVREG_IRQ_RX_ALL) {
2827 netif_rx_schedule(dev);
2828
2829 /* Disable furthur receive irq's */
2830 spin_lock(&np->lock);
2831 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2832
2833 if (np->msi_flags & NV_MSI_X_ENABLED)
2834 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2835 else
2836 writel(np->irqmask, base + NvRegIrqMask);
2837 spin_unlock(&np->lock);
2838 }
2839#else
2840 nv_rx_process(dev, dev->weight);
2841 if (nv_alloc_rx(dev)) {
2842 spin_lock(&np->lock);
2843 if (!np->in_shutdown)
2844 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2845 spin_unlock(&np->lock);
2846 }
2847#endif
2848 if (i > max_interrupt_work) {
2849 spin_lock(&np->lock); 2849 spin_lock(&np->lock);
2850 /* disable interrupts on the nic */ 2850 /* disable interrupts on the nic */
2851 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 2851 if (!(np->msi_flags & NV_MSI_X_ENABLED))
@@ -2869,6 +2869,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2869 return IRQ_RETVAL(i); 2869 return IRQ_RETVAL(i);
2870} 2870}
2871 2871
2872#define TX_WORK_PER_LOOP 64
2873#define RX_WORK_PER_LOOP 64
2874/**
2875 * All _optimized functions are used to help increase performance
2876 * (reduce CPU and increase throughput). They use descripter version 3,
2877 * compiler directives, and reduce memory accesses.
2878 */
2872static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 2879static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2873{ 2880{
2874 struct net_device *dev = (struct net_device *) data; 2881 struct net_device *dev = (struct net_device *) data;
@@ -2887,7 +2894,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2887 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2894 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2888 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2895 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2889 } 2896 }
2890 pci_push(base);
2891 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2897 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2892 if (!(events & np->irqmask)) 2898 if (!(events & np->irqmask))
2893 break; 2899 break;
@@ -2896,22 +2902,46 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2896 nv_tx_done_optimized(dev); 2902 nv_tx_done_optimized(dev);
2897 spin_unlock(&np->lock); 2903 spin_unlock(&np->lock);
2898 2904
2899 if (events & NVREG_IRQ_LINK) { 2905#ifdef CONFIG_FORCEDETH_NAPI
2906 if (events & NVREG_IRQ_RX_ALL) {
2907 netif_rx_schedule(dev);
2908
2909 /* Disable furthur receive irq's */
2910 spin_lock(&np->lock);
2911 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2912
2913 if (np->msi_flags & NV_MSI_X_ENABLED)
2914 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2915 else
2916 writel(np->irqmask, base + NvRegIrqMask);
2917 spin_unlock(&np->lock);
2918 }
2919#else
2920 if (nv_rx_process_optimized(dev, dev->weight)) {
2921 if (unlikely(nv_alloc_rx_optimized(dev))) {
2922 spin_lock(&np->lock);
2923 if (!np->in_shutdown)
2924 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2925 spin_unlock(&np->lock);
2926 }
2927 }
2928#endif
2929 if (unlikely(events & NVREG_IRQ_LINK)) {
2900 spin_lock(&np->lock); 2930 spin_lock(&np->lock);
2901 nv_link_irq(dev); 2931 nv_link_irq(dev);
2902 spin_unlock(&np->lock); 2932 spin_unlock(&np->lock);
2903 } 2933 }
2904 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2934 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2905 spin_lock(&np->lock); 2935 spin_lock(&np->lock);
2906 nv_linkchange(dev); 2936 nv_linkchange(dev);
2907 spin_unlock(&np->lock); 2937 spin_unlock(&np->lock);
2908 np->link_timeout = jiffies + LINK_TIMEOUT; 2938 np->link_timeout = jiffies + LINK_TIMEOUT;
2909 } 2939 }
2910 if (events & (NVREG_IRQ_TX_ERR)) { 2940 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2911 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2941 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2912 dev->name, events); 2942 dev->name, events);
2913 } 2943 }
2914 if (events & (NVREG_IRQ_UNKNOWN)) { 2944 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
2915 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2945 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2916 dev->name, events); 2946 dev->name, events);
2917 } 2947 }
@@ -2933,30 +2963,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2933 break; 2963 break;
2934 } 2964 }
2935 2965
2936#ifdef CONFIG_FORCEDETH_NAPI 2966 if (unlikely(i > max_interrupt_work)) {
2937 if (events & NVREG_IRQ_RX_ALL) {
2938 netif_rx_schedule(dev);
2939
2940 /* Disable furthur receive irq's */
2941 spin_lock(&np->lock);
2942 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2943
2944 if (np->msi_flags & NV_MSI_X_ENABLED)
2945 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2946 else
2947 writel(np->irqmask, base + NvRegIrqMask);
2948 spin_unlock(&np->lock);
2949 }
2950#else
2951 nv_rx_process_optimized(dev, dev->weight);
2952 if (nv_alloc_rx_optimized(dev)) {
2953 spin_lock(&np->lock);
2954 if (!np->in_shutdown)
2955 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2956 spin_unlock(&np->lock);
2957 }
2958#endif
2959 if (i > max_interrupt_work) {
2960 spin_lock(&np->lock); 2967 spin_lock(&np->lock);
2961 /* disable interrupts on the nic */ 2968 /* disable interrupts on the nic */
2962 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 2969 if (!(np->msi_flags & NV_MSI_X_ENABLED))
@@ -2994,7 +3001,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
2994 for (i=0; ; i++) { 3001 for (i=0; ; i++) {
2995 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3002 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
2996 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3003 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
2997 pci_push(base);
2998 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3004 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
2999 if (!(events & np->irqmask)) 3005 if (!(events & np->irqmask))
3000 break; 3006 break;
@@ -3003,11 +3009,11 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3003 nv_tx_done_optimized(dev); 3009 nv_tx_done_optimized(dev);
3004 spin_unlock_irqrestore(&np->lock, flags); 3010 spin_unlock_irqrestore(&np->lock, flags);
3005 3011
3006 if (events & (NVREG_IRQ_TX_ERR)) { 3012 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3007 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3013 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3008 dev->name, events); 3014 dev->name, events);
3009 } 3015 }
3010 if (i > max_interrupt_work) { 3016 if (unlikely(i > max_interrupt_work)) {
3011 spin_lock_irqsave(&np->lock, flags); 3017 spin_lock_irqsave(&np->lock, flags);
3012 /* disable interrupts on the nic */ 3018 /* disable interrupts on the nic */
3013 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3019 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
@@ -3105,20 +3111,20 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3105 for (i=0; ; i++) { 3111 for (i=0; ; i++) {
3106 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3112 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3107 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3113 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3108 pci_push(base);
3109 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3114 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3110 if (!(events & np->irqmask)) 3115 if (!(events & np->irqmask))
3111 break; 3116 break;
3112 3117
3113 nv_rx_process_optimized(dev, dev->weight); 3118 if (nv_rx_process_optimized(dev, dev->weight)) {
3114 if (nv_alloc_rx_optimized(dev)) { 3119 if (unlikely(nv_alloc_rx_optimized(dev))) {
3115 spin_lock_irqsave(&np->lock, flags); 3120 spin_lock_irqsave(&np->lock, flags);
3116 if (!np->in_shutdown) 3121 if (!np->in_shutdown)
3117 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3122 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3118 spin_unlock_irqrestore(&np->lock, flags); 3123 spin_unlock_irqrestore(&np->lock, flags);
3124 }
3119 } 3125 }
3120 3126
3121 if (i > max_interrupt_work) { 3127 if (unlikely(i > max_interrupt_work)) {
3122 spin_lock_irqsave(&np->lock, flags); 3128 spin_lock_irqsave(&np->lock, flags);
3123 /* disable interrupts on the nic */ 3129 /* disable interrupts on the nic */
3124 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3130 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
@@ -3153,7 +3159,6 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3153 for (i=0; ; i++) { 3159 for (i=0; ; i++) {
3154 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3160 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3155 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3161 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3156 pci_push(base);
3157 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3162 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3158 if (!(events & np->irqmask)) 3163 if (!(events & np->irqmask))
3159 break; 3164 break;
@@ -3187,7 +3192,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3187 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3192 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3188 dev->name, events); 3193 dev->name, events);
3189 } 3194 }
3190 if (i > max_interrupt_work) { 3195 if (unlikely(i > max_interrupt_work)) {
3191 spin_lock_irqsave(&np->lock, flags); 3196 spin_lock_irqsave(&np->lock, flags);
3192 /* disable interrupts on the nic */ 3197 /* disable interrupts on the nic */
3193 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3198 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
@@ -4969,7 +4974,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4969#ifdef CONFIG_NET_POLL_CONTROLLER 4974#ifdef CONFIG_NET_POLL_CONTROLLER
4970 dev->poll_controller = nv_poll_controller; 4975 dev->poll_controller = nv_poll_controller;
4971#endif 4976#endif
4972 dev->weight = 64; 4977 dev->weight = RX_WORK_PER_LOOP;
4973#ifdef CONFIG_FORCEDETH_NAPI 4978#ifdef CONFIG_FORCEDETH_NAPI
4974 dev->poll = nv_napi_poll; 4979 dev->poll = nv_napi_poll;
4975#endif 4980#endif