diff options
author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-10-03 19:41:36 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:47:45 -0400 |
commit | bea3348eef27e6044b6161fd04c3152215f96411 (patch) | |
tree | f0990b263e5ce42505d290a4c346fe990bcd4c33 /drivers/net/sungem.c | |
parent | dde4e47e8fe333a5649a3fa0e7db1fa7c08d6158 (diff) |
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sungem.c')
-rw-r--r-- | drivers/net/sungem.c | 52 |
1 files changed, 24 insertions, 28 deletions
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 432803855034..bf821e96f7b2 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * | 19 | * |
20 | * gem_change_mtu() and gem_set_multicast() are called with a read_lock() | 20 | * gem_change_mtu() and gem_set_multicast() are called with a read_lock() |
21 | * help by net/core/dev.c, thus they can't schedule. That means they can't | 21 | * help by net/core/dev.c, thus they can't schedule. That means they can't |
22 | * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock | 22 | * call napi_disable() neither, thus force gem_poll() to keep a spinlock |
23 | * where it could have been dropped. change_mtu especially would love also to | 23 | * where it could have been dropped. change_mtu especially would love also to |
24 | * be able to msleep instead of horrid locked delays when resetting the HW, | 24 | * be able to msleep instead of horrid locked delays when resetting the HW, |
25 | * but that read_lock() makes it impossible, unless I defer it's action to | 25 | * but that read_lock() makes it impossible, unless I defer it's action to |
@@ -878,19 +878,20 @@ static int gem_rx(struct gem *gp, int work_to_do) | |||
878 | return work_done; | 878 | return work_done; |
879 | } | 879 | } |
880 | 880 | ||
881 | static int gem_poll(struct net_device *dev, int *budget) | 881 | static int gem_poll(struct napi_struct *napi, int budget) |
882 | { | 882 | { |
883 | struct gem *gp = dev->priv; | 883 | struct gem *gp = container_of(napi, struct gem, napi); |
884 | struct net_device *dev = gp->dev; | ||
884 | unsigned long flags; | 885 | unsigned long flags; |
886 | int work_done; | ||
885 | 887 | ||
886 | /* | 888 | /* |
887 | * NAPI locking nightmare: See comment at head of driver | 889 | * NAPI locking nightmare: See comment at head of driver |
888 | */ | 890 | */ |
889 | spin_lock_irqsave(&gp->lock, flags); | 891 | spin_lock_irqsave(&gp->lock, flags); |
890 | 892 | ||
893 | work_done = 0; | ||
891 | do { | 894 | do { |
892 | int work_to_do, work_done; | ||
893 | |||
894 | /* Handle anomalies */ | 895 | /* Handle anomalies */ |
895 | if (gp->status & GREG_STAT_ABNORMAL) { | 896 | if (gp->status & GREG_STAT_ABNORMAL) { |
896 | if (gem_abnormal_irq(dev, gp, gp->status)) | 897 | if (gem_abnormal_irq(dev, gp, gp->status)) |
@@ -906,29 +907,25 @@ static int gem_poll(struct net_device *dev, int *budget) | |||
906 | 907 | ||
907 | /* Run RX thread. We don't use any locking here, | 908 | /* Run RX thread. We don't use any locking here, |
908 | * code willing to do bad things - like cleaning the | 909 | * code willing to do bad things - like cleaning the |
909 | * rx ring - must call netif_poll_disable(), which | 910 | * rx ring - must call napi_disable(), which |
910 | * schedule_timeout()'s if polling is already disabled. | 911 | * schedule_timeout()'s if polling is already disabled. |
911 | */ | 912 | */ |
912 | work_to_do = min(*budget, dev->quota); | 913 | work_done += gem_rx(gp, budget); |
913 | |||
914 | work_done = gem_rx(gp, work_to_do); | ||
915 | |||
916 | *budget -= work_done; | ||
917 | dev->quota -= work_done; | ||
918 | 914 | ||
919 | if (work_done >= work_to_do) | 915 | if (work_done >= budget) |
920 | return 1; | 916 | return work_done; |
921 | 917 | ||
922 | spin_lock_irqsave(&gp->lock, flags); | 918 | spin_lock_irqsave(&gp->lock, flags); |
923 | 919 | ||
924 | gp->status = readl(gp->regs + GREG_STAT); | 920 | gp->status = readl(gp->regs + GREG_STAT); |
925 | } while (gp->status & GREG_STAT_NAPI); | 921 | } while (gp->status & GREG_STAT_NAPI); |
926 | 922 | ||
927 | __netif_rx_complete(dev); | 923 | __netif_rx_complete(dev, napi); |
928 | gem_enable_ints(gp); | 924 | gem_enable_ints(gp); |
929 | 925 | ||
930 | spin_unlock_irqrestore(&gp->lock, flags); | 926 | spin_unlock_irqrestore(&gp->lock, flags); |
931 | return 0; | 927 | |
928 | return work_done; | ||
932 | } | 929 | } |
933 | 930 | ||
934 | static irqreturn_t gem_interrupt(int irq, void *dev_id) | 931 | static irqreturn_t gem_interrupt(int irq, void *dev_id) |
@@ -946,17 +943,17 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) | |||
946 | 943 | ||
947 | spin_lock_irqsave(&gp->lock, flags); | 944 | spin_lock_irqsave(&gp->lock, flags); |
948 | 945 | ||
949 | if (netif_rx_schedule_prep(dev)) { | 946 | if (netif_rx_schedule_prep(dev, &gp->napi)) { |
950 | u32 gem_status = readl(gp->regs + GREG_STAT); | 947 | u32 gem_status = readl(gp->regs + GREG_STAT); |
951 | 948 | ||
952 | if (gem_status == 0) { | 949 | if (gem_status == 0) { |
953 | netif_poll_enable(dev); | 950 | napi_enable(&gp->napi); |
954 | spin_unlock_irqrestore(&gp->lock, flags); | 951 | spin_unlock_irqrestore(&gp->lock, flags); |
955 | return IRQ_NONE; | 952 | return IRQ_NONE; |
956 | } | 953 | } |
957 | gp->status = gem_status; | 954 | gp->status = gem_status; |
958 | gem_disable_ints(gp); | 955 | gem_disable_ints(gp); |
959 | __netif_rx_schedule(dev); | 956 | __netif_rx_schedule(dev, &gp->napi); |
960 | } | 957 | } |
961 | 958 | ||
962 | spin_unlock_irqrestore(&gp->lock, flags); | 959 | spin_unlock_irqrestore(&gp->lock, flags); |
@@ -2284,7 +2281,7 @@ static void gem_reset_task(struct work_struct *work) | |||
2284 | 2281 | ||
2285 | mutex_lock(&gp->pm_mutex); | 2282 | mutex_lock(&gp->pm_mutex); |
2286 | 2283 | ||
2287 | netif_poll_disable(gp->dev); | 2284 | napi_disable(&gp->napi); |
2288 | 2285 | ||
2289 | spin_lock_irq(&gp->lock); | 2286 | spin_lock_irq(&gp->lock); |
2290 | spin_lock(&gp->tx_lock); | 2287 | spin_lock(&gp->tx_lock); |
@@ -2307,7 +2304,7 @@ static void gem_reset_task(struct work_struct *work) | |||
2307 | spin_unlock(&gp->tx_lock); | 2304 | spin_unlock(&gp->tx_lock); |
2308 | spin_unlock_irq(&gp->lock); | 2305 | spin_unlock_irq(&gp->lock); |
2309 | 2306 | ||
2310 | netif_poll_enable(gp->dev); | 2307 | napi_enable(&gp->napi); |
2311 | 2308 | ||
2312 | mutex_unlock(&gp->pm_mutex); | 2309 | mutex_unlock(&gp->pm_mutex); |
2313 | } | 2310 | } |
@@ -2324,6 +2321,8 @@ static int gem_open(struct net_device *dev) | |||
2324 | if (!gp->asleep) | 2321 | if (!gp->asleep) |
2325 | rc = gem_do_start(dev); | 2322 | rc = gem_do_start(dev); |
2326 | gp->opened = (rc == 0); | 2323 | gp->opened = (rc == 0); |
2324 | if (gp->opened) | ||
2325 | napi_enable(&gp->napi); | ||
2327 | 2326 | ||
2328 | mutex_unlock(&gp->pm_mutex); | 2327 | mutex_unlock(&gp->pm_mutex); |
2329 | 2328 | ||
@@ -2334,9 +2333,7 @@ static int gem_close(struct net_device *dev) | |||
2334 | { | 2333 | { |
2335 | struct gem *gp = dev->priv; | 2334 | struct gem *gp = dev->priv; |
2336 | 2335 | ||
2337 | /* Note: we don't need to call netif_poll_disable() here because | 2336 | napi_disable(&gp->napi); |
2338 | * our caller (dev_close) already did it for us | ||
2339 | */ | ||
2340 | 2337 | ||
2341 | mutex_lock(&gp->pm_mutex); | 2338 | mutex_lock(&gp->pm_mutex); |
2342 | 2339 | ||
@@ -2358,7 +2355,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2358 | 2355 | ||
2359 | mutex_lock(&gp->pm_mutex); | 2356 | mutex_lock(&gp->pm_mutex); |
2360 | 2357 | ||
2361 | netif_poll_disable(dev); | 2358 | napi_disable(&gp->napi); |
2362 | 2359 | ||
2363 | printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", | 2360 | printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", |
2364 | dev->name, | 2361 | dev->name, |
@@ -2482,7 +2479,7 @@ static int gem_resume(struct pci_dev *pdev) | |||
2482 | spin_unlock(&gp->tx_lock); | 2479 | spin_unlock(&gp->tx_lock); |
2483 | spin_unlock_irqrestore(&gp->lock, flags); | 2480 | spin_unlock_irqrestore(&gp->lock, flags); |
2484 | 2481 | ||
2485 | netif_poll_enable(dev); | 2482 | napi_enable(&gp->napi); |
2486 | 2483 | ||
2487 | mutex_unlock(&gp->pm_mutex); | 2484 | mutex_unlock(&gp->pm_mutex); |
2488 | 2485 | ||
@@ -3121,8 +3118,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3121 | dev->get_stats = gem_get_stats; | 3118 | dev->get_stats = gem_get_stats; |
3122 | dev->set_multicast_list = gem_set_multicast; | 3119 | dev->set_multicast_list = gem_set_multicast; |
3123 | dev->do_ioctl = gem_ioctl; | 3120 | dev->do_ioctl = gem_ioctl; |
3124 | dev->poll = gem_poll; | 3121 | netif_napi_add(dev, &gp->napi, gem_poll, 64); |
3125 | dev->weight = 64; | ||
3126 | dev->ethtool_ops = &gem_ethtool_ops; | 3122 | dev->ethtool_ops = &gem_ethtool_ops; |
3127 | dev->tx_timeout = gem_tx_timeout; | 3123 | dev->tx_timeout = gem_tx_timeout; |
3128 | dev->watchdog_timeo = 5 * HZ; | 3124 | dev->watchdog_timeo = 5 * HZ; |