diff options
author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-10-03 19:41:36 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:47:45 -0400 |
commit | bea3348eef27e6044b6161fd04c3152215f96411 (patch) | |
tree | f0990b263e5ce42505d290a4c346fe990bcd4c33 /net/core/netpoll.c | |
parent | dde4e47e8fe333a5649a3fa0e7db1fa7c08d6158 (diff) |
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r-- | net/core/netpoll.c | 39 |
1 files changed, 25 insertions, 14 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index de1b26aa5720..abe6e3a4cc44 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -119,19 +119,22 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, | |||
119 | static void poll_napi(struct netpoll *np) | 119 | static void poll_napi(struct netpoll *np) |
120 | { | 120 | { |
121 | struct netpoll_info *npinfo = np->dev->npinfo; | 121 | struct netpoll_info *npinfo = np->dev->npinfo; |
122 | struct napi_struct *napi; | ||
122 | int budget = 16; | 123 | int budget = 16; |
123 | 124 | ||
124 | if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && | 125 | list_for_each_entry(napi, &np->dev->napi_list, dev_list) { |
125 | npinfo->poll_owner != smp_processor_id() && | 126 | if (test_bit(NAPI_STATE_SCHED, &napi->state) && |
126 | spin_trylock(&npinfo->poll_lock)) { | 127 | napi->poll_owner != smp_processor_id() && |
127 | npinfo->rx_flags |= NETPOLL_RX_DROP; | 128 | spin_trylock(&napi->poll_lock)) { |
128 | atomic_inc(&trapped); | 129 | npinfo->rx_flags |= NETPOLL_RX_DROP; |
130 | atomic_inc(&trapped); | ||
129 | 131 | ||
130 | np->dev->poll(np->dev, &budget); | 132 | napi->poll(napi, budget); |
131 | 133 | ||
132 | atomic_dec(&trapped); | 134 | atomic_dec(&trapped); |
133 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; | 135 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; |
134 | spin_unlock(&npinfo->poll_lock); | 136 | spin_unlock(&napi->poll_lock); |
137 | } | ||
135 | } | 138 | } |
136 | } | 139 | } |
137 | 140 | ||
@@ -157,7 +160,7 @@ void netpoll_poll(struct netpoll *np) | |||
157 | 160 | ||
158 | /* Process pending work on NIC */ | 161 | /* Process pending work on NIC */ |
159 | np->dev->poll_controller(np->dev); | 162 | np->dev->poll_controller(np->dev); |
160 | if (np->dev->poll) | 163 | if (!list_empty(&np->dev->napi_list)) |
161 | poll_napi(np); | 164 | poll_napi(np); |
162 | 165 | ||
163 | service_arp_queue(np->dev->npinfo); | 166 | service_arp_queue(np->dev->npinfo); |
@@ -233,6 +236,17 @@ repeat: | |||
233 | return skb; | 236 | return skb; |
234 | } | 237 | } |
235 | 238 | ||
239 | static int netpoll_owner_active(struct net_device *dev) | ||
240 | { | ||
241 | struct napi_struct *napi; | ||
242 | |||
243 | list_for_each_entry(napi, &dev->napi_list, dev_list) { | ||
244 | if (napi->poll_owner == smp_processor_id()) | ||
245 | return 1; | ||
246 | } | ||
247 | return 0; | ||
248 | } | ||
249 | |||
236 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | 250 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
237 | { | 251 | { |
238 | int status = NETDEV_TX_BUSY; | 252 | int status = NETDEV_TX_BUSY; |
@@ -246,8 +260,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
246 | } | 260 | } |
247 | 261 | ||
248 | /* don't get messages out of order, and no recursion */ | 262 | /* don't get messages out of order, and no recursion */ |
249 | if (skb_queue_len(&npinfo->txq) == 0 && | 263 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
250 | npinfo->poll_owner != smp_processor_id()) { | ||
251 | unsigned long flags; | 264 | unsigned long flags; |
252 | 265 | ||
253 | local_irq_save(flags); | 266 | local_irq_save(flags); |
@@ -652,8 +665,6 @@ int netpoll_setup(struct netpoll *np) | |||
652 | 665 | ||
653 | npinfo->rx_flags = 0; | 666 | npinfo->rx_flags = 0; |
654 | npinfo->rx_np = NULL; | 667 | npinfo->rx_np = NULL; |
655 | spin_lock_init(&npinfo->poll_lock); | ||
656 | npinfo->poll_owner = -1; | ||
657 | 668 | ||
658 | spin_lock_init(&npinfo->rx_lock); | 669 | spin_lock_init(&npinfo->rx_lock); |
659 | skb_queue_head_init(&npinfo->arp_tx); | 670 | skb_queue_head_init(&npinfo->arp_tx); |