aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c69
1 files changed, 38 insertions, 31 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 1938d6dfc863..24c1294614f2 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -159,6 +159,8 @@
159#define dprintk(x...) do { } while (0) 159#define dprintk(x...) do { } while (0)
160#endif 160#endif
161 161
162#define TX_WORK_PER_LOOP 64
163#define RX_WORK_PER_LOOP 64
162 164
163/* 165/*
164 * Hardware access: 166 * Hardware access:
@@ -745,6 +747,9 @@ struct nv_skb_map {
745struct fe_priv { 747struct fe_priv {
746 spinlock_t lock; 748 spinlock_t lock;
747 749
750 struct net_device *dev;
751 struct napi_struct napi;
752
748 /* General data: 753 /* General data:
749 * Locking: spin_lock(&np->lock); */ 754 * Locking: spin_lock(&np->lock); */
750 struct net_device_stats stats; 755 struct net_device_stats stats;
@@ -1586,9 +1591,10 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1586static void nv_do_rx_refill(unsigned long data) 1591static void nv_do_rx_refill(unsigned long data)
1587{ 1592{
1588 struct net_device *dev = (struct net_device *) data; 1593 struct net_device *dev = (struct net_device *) data;
1594 struct fe_priv *np = netdev_priv(dev);
1589 1595
1590 /* Just reschedule NAPI rx processing */ 1596 /* Just reschedule NAPI rx processing */
1591 netif_rx_schedule(dev); 1597 netif_rx_schedule(dev, &np->napi);
1592} 1598}
1593#else 1599#else
1594static void nv_do_rx_refill(unsigned long data) 1600static void nv_do_rx_refill(unsigned long data)
@@ -2997,7 +3003,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2997 3003
2998#ifdef CONFIG_FORCEDETH_NAPI 3004#ifdef CONFIG_FORCEDETH_NAPI
2999 if (events & NVREG_IRQ_RX_ALL) { 3005 if (events & NVREG_IRQ_RX_ALL) {
3000 netif_rx_schedule(dev); 3006 netif_rx_schedule(dev, &np->napi);
3001 3007
3002 /* Disable furthur receive irq's */ 3008 /* Disable furthur receive irq's */
3003 spin_lock(&np->lock); 3009 spin_lock(&np->lock);
@@ -3010,7 +3016,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3010 spin_unlock(&np->lock); 3016 spin_unlock(&np->lock);
3011 } 3017 }
3012#else 3018#else
3013 if (nv_rx_process(dev, dev->weight)) { 3019 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3014 if (unlikely(nv_alloc_rx(dev))) { 3020 if (unlikely(nv_alloc_rx(dev))) {
3015 spin_lock(&np->lock); 3021 spin_lock(&np->lock);
3016 if (!np->in_shutdown) 3022 if (!np->in_shutdown)
@@ -3079,8 +3085,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3079 return IRQ_RETVAL(i); 3085 return IRQ_RETVAL(i);
3080} 3086}
3081 3087
3082#define TX_WORK_PER_LOOP 64
3083#define RX_WORK_PER_LOOP 64
3084/** 3088/**
3085 * All _optimized functions are used to help increase performance 3089 * All _optimized functions are used to help increase performance
3086 * (reduce CPU and increase throughput). They use descripter version 3, 3090 * (reduce CPU and increase throughput). They use descripter version 3,
@@ -3114,7 +3118,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3114 3118
3115#ifdef CONFIG_FORCEDETH_NAPI 3119#ifdef CONFIG_FORCEDETH_NAPI
3116 if (events & NVREG_IRQ_RX_ALL) { 3120 if (events & NVREG_IRQ_RX_ALL) {
3117 netif_rx_schedule(dev); 3121 netif_rx_schedule(dev, &np->napi);
3118 3122
3119 /* Disable furthur receive irq's */ 3123 /* Disable furthur receive irq's */
3120 spin_lock(&np->lock); 3124 spin_lock(&np->lock);
@@ -3127,7 +3131,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3127 spin_unlock(&np->lock); 3131 spin_unlock(&np->lock);
3128 } 3132 }
3129#else 3133#else
3130 if (nv_rx_process_optimized(dev, dev->weight)) { 3134 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3131 if (unlikely(nv_alloc_rx_optimized(dev))) { 3135 if (unlikely(nv_alloc_rx_optimized(dev))) {
3132 spin_lock(&np->lock); 3136 spin_lock(&np->lock);
3133 if (!np->in_shutdown) 3137 if (!np->in_shutdown)
@@ -3245,19 +3249,19 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3245} 3249}
3246 3250
3247#ifdef CONFIG_FORCEDETH_NAPI 3251#ifdef CONFIG_FORCEDETH_NAPI
3248static int nv_napi_poll(struct net_device *dev, int *budget) 3252static int nv_napi_poll(struct napi_struct *napi, int budget)
3249{ 3253{
3250 int pkts, limit = min(*budget, dev->quota); 3254 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3251 struct fe_priv *np = netdev_priv(dev); 3255 struct net_device *dev = np->dev;
3252 u8 __iomem *base = get_hwbase(dev); 3256 u8 __iomem *base = get_hwbase(dev);
3253 unsigned long flags; 3257 unsigned long flags;
3254 int retcode; 3258 int pkts, retcode;
3255 3259
3256 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3257 pkts = nv_rx_process(dev, limit); 3261 pkts = nv_rx_process(dev, budget);
3258 retcode = nv_alloc_rx(dev); 3262 retcode = nv_alloc_rx(dev);
3259 } else { 3263 } else {
3260 pkts = nv_rx_process_optimized(dev, limit); 3264 pkts = nv_rx_process_optimized(dev, budget);
3261 retcode = nv_alloc_rx_optimized(dev); 3265 retcode = nv_alloc_rx_optimized(dev);
3262 } 3266 }
3263 3267
@@ -3268,13 +3272,12 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
3268 spin_unlock_irqrestore(&np->lock, flags); 3272 spin_unlock_irqrestore(&np->lock, flags);
3269 } 3273 }
3270 3274
3271 if (pkts < limit) { 3275 if (pkts < budget) {
3272 /* all done, no more packets present */
3273 netif_rx_complete(dev);
3274
3275 /* re-enable receive interrupts */ 3276 /* re-enable receive interrupts */
3276 spin_lock_irqsave(&np->lock, flags); 3277 spin_lock_irqsave(&np->lock, flags);
3277 3278
3279 __netif_rx_complete(dev, napi);
3280
3278 np->irqmask |= NVREG_IRQ_RX_ALL; 3281 np->irqmask |= NVREG_IRQ_RX_ALL;
3279 if (np->msi_flags & NV_MSI_X_ENABLED) 3282 if (np->msi_flags & NV_MSI_X_ENABLED)
3280 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3283 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
@@ -3282,13 +3285,8 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
3282 writel(np->irqmask, base + NvRegIrqMask); 3285 writel(np->irqmask, base + NvRegIrqMask);
3283 3286
3284 spin_unlock_irqrestore(&np->lock, flags); 3287 spin_unlock_irqrestore(&np->lock, flags);
3285 return 0;
3286 } else {
3287 /* used up our quantum, so reschedule */
3288 dev->quota -= pkts;
3289 *budget -= pkts;
3290 return 1;
3291 } 3288 }
3289 return pkts;
3292} 3290}
3293#endif 3291#endif
3294 3292
@@ -3296,6 +3294,7 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
3296static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3294static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3297{ 3295{
3298 struct net_device *dev = (struct net_device *) data; 3296 struct net_device *dev = (struct net_device *) data;
3297 struct fe_priv *np = netdev_priv(dev);
3299 u8 __iomem *base = get_hwbase(dev); 3298 u8 __iomem *base = get_hwbase(dev);
3300 u32 events; 3299 u32 events;
3301 3300
@@ -3303,7 +3302,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3303 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3302 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3304 3303
3305 if (events) { 3304 if (events) {
3306 netif_rx_schedule(dev); 3305 netif_rx_schedule(dev, &np->napi);
3307 /* disable receive interrupts on the nic */ 3306 /* disable receive interrupts on the nic */
3308 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3307 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3309 pci_push(base); 3308 pci_push(base);
@@ -3329,7 +3328,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3329 if (!(events & np->irqmask)) 3328 if (!(events & np->irqmask))
3330 break; 3329 break;
3331 3330
3332 if (nv_rx_process_optimized(dev, dev->weight)) { 3331 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3333 if (unlikely(nv_alloc_rx_optimized(dev))) { 3332 if (unlikely(nv_alloc_rx_optimized(dev))) {
3334 spin_lock_irqsave(&np->lock, flags); 3333 spin_lock_irqsave(&np->lock, flags);
3335 if (!np->in_shutdown) 3334 if (!np->in_shutdown)
@@ -4620,7 +4619,9 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4620 if (test->flags & ETH_TEST_FL_OFFLINE) { 4619 if (test->flags & ETH_TEST_FL_OFFLINE) {
4621 if (netif_running(dev)) { 4620 if (netif_running(dev)) {
4622 netif_stop_queue(dev); 4621 netif_stop_queue(dev);
4623 netif_poll_disable(dev); 4622#ifdef CONFIG_FORCEDETH_NAPI
4623 napi_disable(&np->napi);
4624#endif
4624 netif_tx_lock_bh(dev); 4625 netif_tx_lock_bh(dev);
4625 spin_lock_irq(&np->lock); 4626 spin_lock_irq(&np->lock);
4626 nv_disable_hw_interrupts(dev, np->irqmask); 4627 nv_disable_hw_interrupts(dev, np->irqmask);
@@ -4679,7 +4680,9 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4679 nv_start_rx(dev); 4680 nv_start_rx(dev);
4680 nv_start_tx(dev); 4681 nv_start_tx(dev);
4681 netif_start_queue(dev); 4682 netif_start_queue(dev);
4682 netif_poll_enable(dev); 4683#ifdef CONFIG_FORCEDETH_NAPI
4684 napi_enable(&np->napi);
4685#endif
4683 nv_enable_hw_interrupts(dev, np->irqmask); 4686 nv_enable_hw_interrupts(dev, np->irqmask);
4684 } 4687 }
4685 } 4688 }
@@ -4911,7 +4914,9 @@ static int nv_open(struct net_device *dev)
4911 nv_start_rx(dev); 4914 nv_start_rx(dev);
4912 nv_start_tx(dev); 4915 nv_start_tx(dev);
4913 netif_start_queue(dev); 4916 netif_start_queue(dev);
4914 netif_poll_enable(dev); 4917#ifdef CONFIG_FORCEDETH_NAPI
4918 napi_enable(&np->napi);
4919#endif
4915 4920
4916 if (ret) { 4921 if (ret) {
4917 netif_carrier_on(dev); 4922 netif_carrier_on(dev);
@@ -4942,7 +4947,9 @@ static int nv_close(struct net_device *dev)
4942 spin_lock_irq(&np->lock); 4947 spin_lock_irq(&np->lock);
4943 np->in_shutdown = 1; 4948 np->in_shutdown = 1;
4944 spin_unlock_irq(&np->lock); 4949 spin_unlock_irq(&np->lock);
4945 netif_poll_disable(dev); 4950#ifdef CONFIG_FORCEDETH_NAPI
4951 napi_disable(&np->napi);
4952#endif
4946 synchronize_irq(dev->irq); 4953 synchronize_irq(dev->irq);
4947 4954
4948 del_timer_sync(&np->oom_kick); 4955 del_timer_sync(&np->oom_kick);
@@ -4994,6 +5001,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4994 goto out; 5001 goto out;
4995 5002
4996 np = netdev_priv(dev); 5003 np = netdev_priv(dev);
5004 np->dev = dev;
4997 np->pci_dev = pci_dev; 5005 np->pci_dev = pci_dev;
4998 spin_lock_init(&np->lock); 5006 spin_lock_init(&np->lock);
4999 SET_MODULE_OWNER(dev); 5007 SET_MODULE_OWNER(dev);
@@ -5155,9 +5163,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5155#ifdef CONFIG_NET_POLL_CONTROLLER 5163#ifdef CONFIG_NET_POLL_CONTROLLER
5156 dev->poll_controller = nv_poll_controller; 5164 dev->poll_controller = nv_poll_controller;
5157#endif 5165#endif
5158 dev->weight = RX_WORK_PER_LOOP;
5159#ifdef CONFIG_FORCEDETH_NAPI 5166#ifdef CONFIG_FORCEDETH_NAPI
5160 dev->poll = nv_napi_poll; 5167 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5161#endif 5168#endif
5162 SET_ETHTOOL_OPS(dev, &ops); 5169 SET_ETHTOOL_OPS(dev, &ops);
5163 dev->tx_timeout = nv_tx_timeout; 5170 dev->tx_timeout = nv_tx_timeout;