diff options
91 files changed, 1758 insertions, 2375 deletions
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl index b886f52a9aac..e5da4f2b7c22 100644 --- a/Documentation/DocBook/kernel-api.tmpl +++ b/Documentation/DocBook/kernel-api.tmpl | |||
@@ -240,17 +240,23 @@ X!Ilib/string.c | |||
240 | <sect1><title>Driver Support</title> | 240 | <sect1><title>Driver Support</title> |
241 | !Enet/core/dev.c | 241 | !Enet/core/dev.c |
242 | !Enet/ethernet/eth.c | 242 | !Enet/ethernet/eth.c |
243 | !Enet/sched/sch_generic.c | ||
243 | !Iinclude/linux/etherdevice.h | 244 | !Iinclude/linux/etherdevice.h |
245 | !Iinclude/linux/netdevice.h | ||
246 | </sect1> | ||
247 | <sect1><title>PHY Support</title> | ||
244 | !Edrivers/net/phy/phy.c | 248 | !Edrivers/net/phy/phy.c |
245 | !Idrivers/net/phy/phy.c | 249 | !Idrivers/net/phy/phy.c |
246 | !Edrivers/net/phy/phy_device.c | 250 | !Edrivers/net/phy/phy_device.c |
247 | !Idrivers/net/phy/phy_device.c | 251 | !Idrivers/net/phy/phy_device.c |
248 | !Edrivers/net/phy/mdio_bus.c | 252 | !Edrivers/net/phy/mdio_bus.c |
249 | !Idrivers/net/phy/mdio_bus.c | 253 | !Idrivers/net/phy/mdio_bus.c |
254 | </sect1> | ||
250 | <!-- FIXME: Removed for now since no structured comments in source | 255 | <!-- FIXME: Removed for now since no structured comments in source |
256 | <sect1><title>Wireless</title> | ||
251 | X!Enet/core/wireless.c | 257 | X!Enet/core/wireless.c |
252 | --> | ||
253 | </sect1> | 258 | </sect1> |
259 | --> | ||
254 | <sect1><title>Synchronous PPP</title> | 260 | <sect1><title>Synchronous PPP</title> |
255 | !Edrivers/net/wan/syncppp.c | 261 | !Edrivers/net/wan/syncppp.c |
256 | </sect1> | 262 | </sect1> |
diff --git a/Documentation/networking/NAPI_HOWTO.txt b/Documentation/networking/NAPI_HOWTO.txt deleted file mode 100644 index 7907435a661c..000000000000 --- a/Documentation/networking/NAPI_HOWTO.txt +++ /dev/null | |||
@@ -1,766 +0,0 @@ | |||
1 | HISTORY: | ||
2 | February 16/2002 -- revision 0.2.1: | ||
3 | COR typo corrected | ||
4 | February 10/2002 -- revision 0.2: | ||
5 | some spell checking ;-> | ||
6 | January 12/2002 -- revision 0.1 | ||
7 | This is still work in progress so may change. | ||
8 | To keep up to date please watch this space. | ||
9 | |||
10 | Introduction to NAPI | ||
11 | ==================== | ||
12 | |||
13 | NAPI is a proven (www.cyberus.ca/~hadi/usenix-paper.tgz) technique | ||
14 | to improve network performance on Linux. For more details please | ||
15 | read that paper. | ||
16 | NAPI provides a "inherent mitigation" which is bound by system capacity | ||
17 | as can be seen from the following data collected by Robert on Gigabit | ||
18 | ethernet (e1000): | ||
19 | |||
20 | Psize Ipps Tput Rxint Txint Done Ndone | ||
21 | --------------------------------------------------------------- | ||
22 | 60 890000 409362 17 27622 7 6823 | ||
23 | 128 758150 464364 21 9301 10 7738 | ||
24 | 256 445632 774646 42 15507 21 12906 | ||
25 | 512 232666 994445 241292 19147 241192 1062 | ||
26 | 1024 119061 1000003 872519 19258 872511 0 | ||
27 | 1440 85193 1000003 946576 19505 946569 0 | ||
28 | |||
29 | |||
30 | Legend: | ||
31 | "Ipps" stands for input packets per second. | ||
32 | "Tput" == packets out of total 1M that made it out. | ||
33 | "txint" == transmit completion interrupts seen | ||
34 | "Done" == The number of times that the poll() managed to pull all | ||
35 | packets out of the rx ring. Note from this that the lower the | ||
36 | load the more we could clean up the rxring | ||
37 | "Ndone" == is the converse of "Done". Note again, that the higher | ||
38 | the load the more times we couldn't clean up the rxring. | ||
39 | |||
40 | Observe that: | ||
41 | when the NIC receives 890Kpackets/sec only 17 rx interrupts are generated. | ||
42 | The system cant handle the processing at 1 interrupt/packet at that load level. | ||
43 | At lower rates on the other hand, rx interrupts go up and therefore the | ||
44 | interrupt/packet ratio goes up (as observable from that table). So there is | ||
45 | possibility that under low enough input, you get one poll call for each | ||
46 | input packet caused by a single interrupt each time. And if the system | ||
47 | cant handle interrupt per packet ratio of 1, then it will just have to | ||
48 | chug along .... | ||
49 | |||
50 | |||
51 | 0) Prerequisites: | ||
52 | ================== | ||
53 | A driver MAY continue using the old 2.4 technique for interfacing | ||
54 | to the network stack and not benefit from the NAPI changes. | ||
55 | NAPI additions to the kernel do not break backward compatibility. | ||
56 | NAPI, however, requires the following features to be available: | ||
57 | |||
58 | A) DMA ring or enough RAM to store packets in software devices. | ||
59 | |||
60 | B) Ability to turn off interrupts or maybe events that send packets up | ||
61 | the stack. | ||
62 | |||
63 | NAPI processes packet events in what is known as dev->poll() method. | ||
64 | Typically, only packet receive events are processed in dev->poll(). | ||
65 | The rest of the events MAY be processed by the regular interrupt handler | ||
66 | to reduce processing latency (justified also because there are not that | ||
67 | many of them). | ||
68 | Note, however, NAPI does not enforce that dev->poll() only processes | ||
69 | receive events. | ||
70 | Tests with the tulip driver indicated slightly increased latency if | ||
71 | all of the interrupt handler is moved to dev->poll(). Also MII handling | ||
72 | gets a little trickier. | ||
73 | The example used in this document is to move the receive processing only | ||
74 | to dev->poll(); this is shown with the patch for the tulip driver. | ||
75 | For an example of code that moves all the interrupt driver to | ||
76 | dev->poll() look at the ported e1000 code. | ||
77 | |||
78 | There are caveats that might force you to go with moving everything to | ||
79 | dev->poll(). Different NICs work differently depending on their status/event | ||
80 | acknowledgement setup. | ||
81 | There are two types of event register ACK mechanisms. | ||
82 | I) what is known as Clear-on-read (COR). | ||
83 | when you read the status/event register, it clears everything! | ||
84 | The natsemi and sunbmac NICs are known to do this. | ||
85 | In this case your only choice is to move all to dev->poll() | ||
86 | |||
87 | II) Clear-on-write (COW) | ||
88 | i) you clear the status by writing a 1 in the bit-location you want. | ||
89 | These are the majority of the NICs and work the best with NAPI. | ||
90 | Put only receive events in dev->poll(); leave the rest in | ||
91 | the old interrupt handler. | ||
92 | ii) whatever you write in the status register clears every thing ;-> | ||
93 | Cant seem to find any supported by Linux which do this. If | ||
94 | someone knows such a chip email us please. | ||
95 | Move all to dev->poll() | ||
96 | |||
97 | C) Ability to detect new work correctly. | ||
98 | NAPI works by shutting down event interrupts when there's work and | ||
99 | turning them on when there's none. | ||
100 | New packets might show up in the small window while interrupts were being | ||
101 | re-enabled (refer to appendix 2). A packet might sneak in during the period | ||
102 | we are enabling interrupts. We only get to know about such a packet when the | ||
103 | next new packet arrives and generates an interrupt. | ||
104 | Essentially, there is a small window of opportunity for a race condition | ||
105 | which for clarity we'll refer to as the "rotting packet". | ||
106 | |||
107 | This is a very important topic and appendix 2 is dedicated for more | ||
108 | discussion. | ||
109 | |||
110 | Locking rules and environmental guarantees | ||
111 | ========================================== | ||
112 | |||
113 | -Guarantee: Only one CPU at any time can call dev->poll(); this is because | ||
114 | only one CPU can pick the initial interrupt and hence the initial | ||
115 | netif_rx_schedule(dev); | ||
116 | - The core layer invokes devices to send packets in a round robin format. | ||
117 | This implies receive is totally lockless because of the guarantee that only | ||
118 | one CPU is executing it. | ||
119 | - contention can only be the result of some other CPU accessing the rx | ||
120 | ring. This happens only in close() and suspend() (when these methods | ||
121 | try to clean the rx ring); | ||
122 | ****guarantee: driver authors need not worry about this; synchronization | ||
123 | is taken care for them by the top net layer. | ||
124 | -local interrupts are enabled (if you dont move all to dev->poll()). For | ||
125 | example link/MII and txcomplete continue functioning just same old way. | ||
126 | This improves the latency of processing these events. It is also assumed that | ||
127 | the receive interrupt is the largest cause of noise. Note this might not | ||
128 | always be true. | ||
129 | [according to Manfred Spraul, the winbond insists on sending one | ||
130 | txmitcomplete interrupt for each packet (although this can be mitigated)]. | ||
131 | For these broken drivers, move all to dev->poll(). | ||
132 | |||
133 | For the rest of this text, we'll assume that dev->poll() only | ||
134 | processes receive events. | ||
135 | |||
136 | new methods introduce by NAPI | ||
137 | ============================= | ||
138 | |||
139 | a) netif_rx_schedule(dev) | ||
140 | Called by an IRQ handler to schedule a poll for device | ||
141 | |||
142 | b) netif_rx_schedule_prep(dev) | ||
143 | puts the device in a state which allows for it to be added to the | ||
144 | CPU polling list if it is up and running. You can look at this as | ||
145 | the first half of netif_rx_schedule(dev) above; the second half | ||
146 | being c) below. | ||
147 | |||
148 | c) __netif_rx_schedule(dev) | ||
149 | Add device to the poll list for this CPU; assuming that _prep above | ||
150 | has already been called and returned 1. | ||
151 | |||
152 | d) netif_rx_reschedule(dev, undo) | ||
153 | Called to reschedule polling for device specifically for some | ||
154 | deficient hardware. Read Appendix 2 for more details. | ||
155 | |||
156 | e) netif_rx_complete(dev) | ||
157 | |||
158 | Remove interface from the CPU poll list: it must be in the poll list | ||
159 | on current cpu. This primitive is called by dev->poll(), when | ||
160 | it completes its work. The device cannot be out of poll list at this | ||
161 | call, if it is then clearly it is a BUG(). You'll know ;-> | ||
162 | |||
163 | All of the above methods are used below, so keep reading for clarity. | ||
164 | |||
165 | Device driver changes to be made when porting NAPI | ||
166 | ================================================== | ||
167 | |||
168 | Below we describe what kind of changes are required for NAPI to work. | ||
169 | |||
170 | 1) introduction of dev->poll() method | ||
171 | ===================================== | ||
172 | |||
173 | This is the method that is invoked by the network core when it requests | ||
174 | for new packets from the driver. A driver is allowed to send upto | ||
175 | dev->quota packets by the current CPU before yielding to the network | ||
176 | subsystem (so other devices can also get opportunity to send to the stack). | ||
177 | |||
178 | dev->poll() prototype looks as follows: | ||
179 | int my_poll(struct net_device *dev, int *budget) | ||
180 | |||
181 | budget is the remaining number of packets the network subsystem on the | ||
182 | current CPU can send up the stack before yielding to other system tasks. | ||
183 | *Each driver is responsible for decrementing budget by the total number of | ||
184 | packets sent. | ||
185 | Total number of packets cannot exceed dev->quota. | ||
186 | |||
187 | dev->poll() method is invoked by the top layer, the driver just sends if it | ||
188 | can to the stack the packet quantity requested. | ||
189 | |||
190 | more on dev->poll() below after the interrupt changes are explained. | ||
191 | |||
192 | 2) registering dev->poll() method | ||
193 | =================================== | ||
194 | |||
195 | dev->poll should be set in the dev->probe() method. | ||
196 | e.g: | ||
197 | dev->open = my_open; | ||
198 | . | ||
199 | . | ||
200 | /* two new additions */ | ||
201 | /* first register my poll method */ | ||
202 | dev->poll = my_poll; | ||
203 | /* next register my weight/quanta; can be overridden in /proc */ | ||
204 | dev->weight = 16; | ||
205 | . | ||
206 | . | ||
207 | dev->stop = my_close; | ||
208 | |||
209 | |||
210 | |||
211 | 3) scheduling dev->poll() | ||
212 | ============================= | ||
213 | This involves modifying the interrupt handler and the code | ||
214 | path which takes the packet off the NIC and sends them to the | ||
215 | stack. | ||
216 | |||
217 | it's important at this point to introduce the classical D Becker | ||
218 | interrupt processor: | ||
219 | |||
220 | ------------------ | ||
221 | static irqreturn_t | ||
222 | netdevice_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
223 | { | ||
224 | |||
225 | struct net_device *dev = (struct net_device *)dev_instance; | ||
226 | struct my_private *tp = (struct my_private *)dev->priv; | ||
227 | |||
228 | int work_count = my_work_count; | ||
229 | status = read_interrupt_status_reg(); | ||
230 | if (status == 0) | ||
231 | return IRQ_NONE; /* Shared IRQ: not us */ | ||
232 | if (status == 0xffff) | ||
233 | return IRQ_HANDLED; /* Hot unplug */ | ||
234 | if (status & error) | ||
235 | do_some_error_handling() | ||
236 | |||
237 | do { | ||
238 | acknowledge_ints_ASAP(); | ||
239 | |||
240 | if (status & link_interrupt) { | ||
241 | spin_lock(&tp->link_lock); | ||
242 | do_some_link_stat_stuff(); | ||
243 | spin_lock(&tp->link_lock); | ||
244 | } | ||
245 | |||
246 | if (status & rx_interrupt) { | ||
247 | receive_packets(dev); | ||
248 | } | ||
249 | |||
250 | if (status & rx_nobufs) { | ||
251 | make_rx_buffs_avail(); | ||
252 | } | ||
253 | |||
254 | if (status & tx_related) { | ||
255 | spin_lock(&tp->lock); | ||
256 | tx_ring_free(dev); | ||
257 | if (tx_died) | ||
258 | restart_tx(); | ||
259 | spin_unlock(&tp->lock); | ||
260 | } | ||
261 | |||
262 | status = read_interrupt_status_reg(); | ||
263 | |||
264 | } while (!(status & error) || more_work_to_be_done); | ||
265 | return IRQ_HANDLED; | ||
266 | } | ||
267 | |||
268 | ---------------------------------------------------------------------- | ||
269 | |||
270 | We now change this to what is shown below to NAPI-enable it: | ||
271 | |||
272 | ---------------------------------------------------------------------- | ||
273 | static irqreturn_t | ||
274 | netdevice_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
275 | { | ||
276 | struct net_device *dev = (struct net_device *)dev_instance; | ||
277 | struct my_private *tp = (struct my_private *)dev->priv; | ||
278 | |||
279 | status = read_interrupt_status_reg(); | ||
280 | if (status == 0) | ||
281 | return IRQ_NONE; /* Shared IRQ: not us */ | ||
282 | if (status == 0xffff) | ||
283 | return IRQ_HANDLED; /* Hot unplug */ | ||
284 | if (status & error) | ||
285 | do_some_error_handling(); | ||
286 | |||
287 | do { | ||
288 | /************************ start note *********************************/ | ||
289 | acknowledge_ints_ASAP(); // dont ack rx and rxnobuff here | ||
290 | /************************ end note *********************************/ | ||
291 | |||
292 | if (status & link_interrupt) { | ||
293 | spin_lock(&tp->link_lock); | ||
294 | do_some_link_stat_stuff(); | ||
295 | spin_unlock(&tp->link_lock); | ||
296 | } | ||
297 | /************************ start note *********************************/ | ||
298 | if (status & rx_interrupt || (status & rx_nobuffs)) { | ||
299 | if (netif_rx_schedule_prep(dev)) { | ||
300 | |||
301 | /* disable interrupts caused | ||
302 | * by arriving packets */ | ||
303 | disable_rx_and_rxnobuff_ints(); | ||
304 | /* tell system we have work to be done. */ | ||
305 | __netif_rx_schedule(dev); | ||
306 | } else { | ||
307 | printk("driver bug! interrupt while in poll\n"); | ||
308 | /* FIX by disabling interrupts */ | ||
309 | disable_rx_and_rxnobuff_ints(); | ||
310 | } | ||
311 | } | ||
312 | /************************ end note note *********************************/ | ||
313 | |||
314 | if (status & tx_related) { | ||
315 | spin_lock(&tp->lock); | ||
316 | tx_ring_free(dev); | ||
317 | |||
318 | if (tx_died) | ||
319 | restart_tx(); | ||
320 | spin_unlock(&tp->lock); | ||
321 | } | ||
322 | |||
323 | status = read_interrupt_status_reg(); | ||
324 | |||
325 | /************************ start note *********************************/ | ||
326 | } while (!(status & error) || more_work_to_be_done(status)); | ||
327 | /************************ end note note *********************************/ | ||
328 | return IRQ_HANDLED; | ||
329 | } | ||
330 | |||
331 | --------------------------------------------------------------------- | ||
332 | |||
333 | |||
334 | We note several things from above: | ||
335 | |||
336 | I) Any interrupt source which is caused by arriving packets is now | ||
337 | turned off when it occurs. Depending on the hardware, there could be | ||
338 | several reasons that arriving packets would cause interrupts; these are the | ||
339 | interrupt sources we wish to avoid. The two common ones are a) a packet | ||
340 | arriving (rxint) b) a packet arriving and finding no DMA buffers available | ||
341 | (rxnobuff) . | ||
342 | This means also acknowledge_ints_ASAP() will not clear the status | ||
343 | register for those two items above; clearing is done in the place where | ||
344 | proper work is done within NAPI; at the poll() and refill_rx_ring() | ||
345 | discussed further below. | ||
346 | netif_rx_schedule_prep() returns 1 if device is in running state and | ||
347 | gets successfully added to the core poll list. If we get a zero value | ||
348 | we can _almost_ assume are already added to the list (instead of not running. | ||
349 | Logic based on the fact that you shouldn't get interrupt if not running) | ||
350 | We rectify this by disabling rx and rxnobuf interrupts. | ||
351 | |||
352 | II) that receive_packets(dev) and make_rx_buffs_avail() may have disappeared. | ||
353 | These functionalities are still around actually...... | ||
354 | |||
355 | infact, receive_packets(dev) is very close to my_poll() and | ||
356 | make_rx_buffs_avail() is invoked from my_poll() | ||
357 | |||
358 | 4) converting receive_packets() to dev->poll() | ||
359 | =============================================== | ||
360 | |||
361 | We need to convert the classical D Becker receive_packets(dev) to my_poll() | ||
362 | |||
363 | First the typical receive_packets() below: | ||
364 | ------------------------------------------------------------------- | ||
365 | |||
366 | /* this is called by interrupt handler */ | ||
367 | static void receive_packets (struct net_device *dev) | ||
368 | { | ||
369 | |||
370 | struct my_private *tp = (struct my_private *)dev->priv; | ||
371 | rx_ring = tp->rx_ring; | ||
372 | cur_rx = tp->cur_rx; | ||
373 | int entry = cur_rx % RX_RING_SIZE; | ||
374 | int received = 0; | ||
375 | int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; | ||
376 | |||
377 | while (rx_ring_not_empty) { | ||
378 | u32 rx_status; | ||
379 | unsigned int rx_size; | ||
380 | unsigned int pkt_size; | ||
381 | struct sk_buff *skb; | ||
382 | /* read size+status of next frame from DMA ring buffer */ | ||
383 | /* the number 16 and 4 are just examples */ | ||
384 | rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset)); | ||
385 | rx_size = rx_status >> 16; | ||
386 | pkt_size = rx_size - 4; | ||
387 | |||
388 | /* process errors */ | ||
389 | if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) || | ||
390 | (!(rx_status & RxStatusOK))) { | ||
391 | netdrv_rx_err (rx_status, dev, tp, ioaddr); | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | if (--rx_work_limit < 0) | ||
396 | break; | ||
397 | |||
398 | /* grab a skb */ | ||
399 | skb = dev_alloc_skb (pkt_size + 2); | ||
400 | if (skb) { | ||
401 | . | ||
402 | . | ||
403 | netif_rx (skb); | ||
404 | . | ||
405 | . | ||
406 | } else { /* OOM */ | ||
407 | /*seems very driver specific ... some just pass | ||
408 | whatever is on the ring already. */ | ||
409 | } | ||
410 | |||
411 | /* move to the next skb on the ring */ | ||
412 | entry = (++tp->cur_rx) % RX_RING_SIZE; | ||
413 | received++ ; | ||
414 | |||
415 | } | ||
416 | |||
417 | /* store current ring pointer state */ | ||
418 | tp->cur_rx = cur_rx; | ||
419 | |||
420 | /* Refill the Rx ring buffers if they are needed */ | ||
421 | refill_rx_ring(); | ||
422 | . | ||
423 | . | ||
424 | |||
425 | } | ||
426 | ------------------------------------------------------------------- | ||
427 | We change it to a new one below; note the additional parameter in | ||
428 | the call. | ||
429 | |||
430 | ------------------------------------------------------------------- | ||
431 | |||
432 | /* this is called by the network core */ | ||
433 | static int my_poll (struct net_device *dev, int *budget) | ||
434 | { | ||
435 | |||
436 | struct my_private *tp = (struct my_private *)dev->priv; | ||
437 | rx_ring = tp->rx_ring; | ||
438 | cur_rx = tp->cur_rx; | ||
439 | int entry = cur_rx % RX_BUF_LEN; | ||
440 | /* maximum packets to send to the stack */ | ||
441 | /************************ note note *********************************/ | ||
442 | int rx_work_limit = dev->quota; | ||
443 | |||
444 | /************************ end note note *********************************/ | ||
445 | do { // outer beginning loop starts here | ||
446 | |||
447 | clear_rx_status_register_bit(); | ||
448 | |||
449 | while (rx_ring_not_empty) { | ||
450 | u32 rx_status; | ||
451 | unsigned int rx_size; | ||
452 | unsigned int pkt_size; | ||
453 | struct sk_buff *skb; | ||
454 | /* read size+status of next frame from DMA ring buffer */ | ||
455 | /* the number 16 and 4 are just examples */ | ||
456 | rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset)); | ||
457 | rx_size = rx_status >> 16; | ||
458 | pkt_size = rx_size - 4; | ||
459 | |||
460 | /* process errors */ | ||
461 | if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) || | ||
462 | (!(rx_status & RxStatusOK))) { | ||
463 | netdrv_rx_err (rx_status, dev, tp, ioaddr); | ||
464 | return 1; | ||
465 | } | ||
466 | |||
467 | /************************ note note *********************************/ | ||
468 | if (--rx_work_limit < 0) { /* we got packets, but no quota */ | ||
469 | /* store current ring pointer state */ | ||
470 | tp->cur_rx = cur_rx; | ||
471 | |||
472 | /* Refill the Rx ring buffers if they are needed */ | ||
473 | refill_rx_ring(dev); | ||
474 | goto not_done; | ||
475 | } | ||
476 | /********************** end note **********************************/ | ||
477 | |||
478 | /* grab a skb */ | ||
479 | skb = dev_alloc_skb (pkt_size + 2); | ||
480 | if (skb) { | ||
481 | . | ||
482 | . | ||
483 | /************************ note note *********************************/ | ||
484 | netif_receive_skb (skb); | ||
485 | /********************** end note **********************************/ | ||
486 | . | ||
487 | . | ||
488 | } else { /* OOM */ | ||
489 | /*seems very driver specific ... common is just pass | ||
490 | whatever is on the ring already. */ | ||
491 | } | ||
492 | |||
493 | /* move to the next skb on the ring */ | ||
494 | entry = (++tp->cur_rx) % RX_RING_SIZE; | ||
495 | received++ ; | ||
496 | |||
497 | } | ||
498 | |||
499 | /* store current ring pointer state */ | ||
500 | tp->cur_rx = cur_rx; | ||
501 | |||
502 | /* Refill the Rx ring buffers if they are needed */ | ||
503 | refill_rx_ring(dev); | ||
504 | |||
505 | /* no packets on ring; but new ones can arrive since we last | ||
506 | checked */ | ||
507 | status = read_interrupt_status_reg(); | ||
508 | if (rx status is not set) { | ||
509 | /* If something arrives in this narrow window, | ||
510 | an interrupt will be generated */ | ||
511 | goto done; | ||
512 | } | ||
513 | /* done! at least that's what it looks like ;-> | ||
514 | if new packets came in after our last check on status bits | ||
515 | they'll be caught by the while check and we go back and clear them | ||
516 | since we havent exceeded our quota */ | ||
517 | } while (rx_status_is_set); | ||
518 | |||
519 | done: | ||
520 | |||
521 | /************************ note note *********************************/ | ||
522 | dev->quota -= received; | ||
523 | *budget -= received; | ||
524 | |||
525 | /* If RX ring is not full we are out of memory. */ | ||
526 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | ||
527 | goto oom; | ||
528 | |||
529 | /* we are happy/done, no more packets on ring; put us back | ||
530 | to where we can start processing interrupts again */ | ||
531 | netif_rx_complete(dev); | ||
532 | enable_rx_and_rxnobuf_ints(); | ||
533 | |||
534 | /* The last op happens after poll completion. Which means the following: | ||
535 | * 1. it can race with disabling irqs in irq handler (which are done to | ||
536 | * schedule polls) | ||
537 | * 2. it can race with dis/enabling irqs in other poll threads | ||
538 | * 3. if an irq raised after the beginning of the outer beginning | ||
539 | * loop (marked in the code above), it will be immediately | ||
540 | * triggered here. | ||
541 | * | ||
542 | * Summarizing: the logic may result in some redundant irqs both | ||
543 | * due to races in masking and due to too late acking of already | ||
544 | * processed irqs. The good news: no events are ever lost. | ||
545 | */ | ||
546 | |||
547 | return 0; /* done */ | ||
548 | |||
549 | not_done: | ||
550 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || | ||
551 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | ||
552 | refill_rx_ring(dev); | ||
553 | |||
554 | if (!received) { | ||
555 | printk("received==0\n"); | ||
556 | received = 1; | ||
557 | } | ||
558 | dev->quota -= received; | ||
559 | *budget -= received; | ||
560 | return 1; /* not_done */ | ||
561 | |||
562 | oom: | ||
563 | /* Start timer, stop polling, but do not enable rx interrupts. */ | ||
564 | start_poll_timer(dev); | ||
565 | return 0; /* we'll take it from here so tell core "done"*/ | ||
566 | |||
567 | /************************ End note note *********************************/ | ||
568 | } | ||
569 | ------------------------------------------------------------------- | ||
570 | |||
571 | From above we note that: | ||
572 | 0) rx_work_limit = dev->quota | ||
573 | 1) refill_rx_ring() is in charge of clearing the bit for rxnobuff when | ||
574 | it does the work. | ||
575 | 2) We have a done and not_done state. | ||
576 | 3) instead of netif_rx() we call netif_receive_skb() to pass the skb. | ||
577 | 4) we have a new way of handling oom condition | ||
578 | 5) A new outer for (;;) loop has been added. This serves the purpose of | ||
579 | ensuring that if a new packet has come in, after we are all set and done, | ||
580 | and we have not exceeded our quota that we continue sending packets up. | ||
581 | |||
582 | |||
583 | ----------------------------------------------------------- | ||
584 | Poll timer code will need to do the following: | ||
585 | |||
586 | a) | ||
587 | |||
588 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || | ||
589 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | ||
590 | refill_rx_ring(dev); | ||
591 | |||
592 | /* If RX ring is not full we are still out of memory. | ||
593 | Restart the timer again. Else we re-add ourselves | ||
594 | to the master poll list. | ||
595 | */ | ||
596 | |||
597 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | ||
598 | restart_timer(); | ||
599 | |||
600 | else netif_rx_schedule(dev); /* we are back on the poll list */ | ||
601 | |||
602 | 5) dev->close() and dev->suspend() issues | ||
603 | ========================================== | ||
604 | The driver writer needn't worry about this; the top net layer takes | ||
605 | care of it. | ||
606 | |||
607 | 6) Adding new Stats to /proc | ||
608 | ============================= | ||
609 | In order to debug some of the new features, we introduce new stats | ||
610 | that need to be collected. | ||
611 | TODO: Fill this later. | ||
612 | |||
613 | APPENDIX 1: discussion on using ethernet HW FC | ||
614 | ============================================== | ||
615 | Most chips with FC only send a pause packet when they run out of Rx buffers. | ||
616 | Since packets are pulled off the DMA ring by a softirq in NAPI, | ||
617 | if the system is slow in grabbing them and we have a high input | ||
618 | rate (faster than the system's capacity to remove packets), then theoretically | ||
619 | there will only be one rx interrupt for all packets during a given packetstorm. | ||
620 | Under low load, we might have a single interrupt per packet. | ||
621 | FC should be programmed to apply in the case when the system cant pull out | ||
622 | packets fast enough i.e send a pause only when you run out of rx buffers. | ||
623 | Note FC in itself is a good solution but we have found it to not be | ||
624 | much of a commodity feature (both in NICs and switches) and hence falls | ||
625 | under the same category as using NIC based mitigation. Also, experiments | ||
626 | indicate that it's much harder to resolve the resource allocation | ||
627 | issue (aka lazy receiving that NAPI offers) and hence quantify its usefulness | ||
628 | proved harder. In any case, FC works even better with NAPI but is not | ||
629 | necessary. | ||
630 | |||
631 | |||
632 | APPENDIX 2: the "rotting packet" race-window avoidance scheme | ||
633 | ============================================================= | ||
634 | |||
635 | There are two types of associations seen here | ||
636 | |||
637 | 1) status/int which honors level triggered IRQ | ||
638 | |||
639 | If a status bit for receive or rxnobuff is set and the corresponding | ||
640 | interrupt-enable bit is not on, then no interrupts will be generated. However, | ||
641 | as soon as the "interrupt-enable" bit is unmasked, an immediate interrupt is | ||
642 | generated. [assuming the status bit was not turned off]. | ||
643 | Generally the concept of level triggered IRQs in association with a status and | ||
644 | interrupt-enable CSR register set is used to avoid the race. | ||
645 | |||
646 | If we take the example of the tulip: | ||
647 | "pending work" is indicated by the status bit(CSR5 in tulip). | ||
648 | the corresponding interrupt bit (CSR7 in tulip) might be turned off (but | ||
649 | the CSR5 will continue to be turned on with new packet arrivals even if | ||
650 | we clear it the first time) | ||
651 | Very important is the fact that if we turn on the interrupt bit on when | ||
652 | status is set that an immediate irq is triggered. | ||
653 | |||
654 | If we cleared the rx ring and proclaimed there was "no more work | ||
655 | to be done" and then went on to do a few other things; then when we enable | ||
656 | interrupts, there is a possibility that a new packet might sneak in during | ||
657 | this phase. It helps to look at the pseudo code for the tulip poll | ||
658 | routine: | ||
659 | |||
660 | -------------------------- | ||
661 | do { | ||
662 | ACK; | ||
663 | while (ring_is_not_empty()) { | ||
664 | work-work-work | ||
665 | if quota is exceeded: exit, no touching irq status/mask | ||
666 | } | ||
667 | /* No packets, but new can arrive while we are doing this*/ | ||
668 | CSR5 := read | ||
669 | if (CSR5 is not set) { | ||
670 | /* If something arrives in this narrow window here, | ||
671 | * where the comments are ;-> irq will be generated */ | ||
672 | unmask irqs; | ||
673 | exit poll; | ||
674 | } | ||
675 | } while (rx_status_is_set); | ||
676 | ------------------------ | ||
677 | |||
678 | CSR5 bit of interest is only the rx status. | ||
679 | If you look at the last if statement: | ||
680 | you just finished grabbing all the packets from the rx ring .. you check if | ||
681 | status bit says there are more packets just in ... it says none; you then | ||
682 | enable rx interrupts again; if a new packet just came in during this check, | ||
683 | we are counting that CSR5 will be set in that small window of opportunity | ||
684 | and that by re-enabling interrupts, we would actually trigger an interrupt | ||
685 | to register the new packet for processing. | ||
686 | |||
687 | [The above description nay be very verbose, if you have better wording | ||
688 | that will make this more understandable, please suggest it.] | ||
689 | |||
690 | 2) non-capable hardware | ||
691 | |||
692 | These do not generally respect level triggered IRQs. Normally, | ||
693 | irqs may be lost while being masked and the only way to leave poll is to do | ||
694 | a double check for new input after netif_rx_complete() is invoked | ||
695 | and re-enable polling (after seeing this new input). | ||
696 | |||
697 | Sample code: | ||
698 | |||
699 | --------- | ||
700 | . | ||
701 | . | ||
702 | restart_poll: | ||
703 | while (ring_is_not_empty()) { | ||
704 | work-work-work | ||
705 | if quota is exceeded: exit, not touching irq status/mask | ||
706 | } | ||
707 | . | ||
708 | . | ||
709 | . | ||
710 | enable_rx_interrupts() | ||
711 | netif_rx_complete(dev); | ||
712 | if (ring_has_new_packet() && netif_rx_reschedule(dev, received)) { | ||
713 | disable_rx_and_rxnobufs() | ||
714 | goto restart_poll | ||
715 | } while (rx_status_is_set); | ||
716 | --------- | ||
717 | |||
718 | Basically netif_rx_complete() removes us from the poll list, but because a | ||
719 | new packet which will never be caught due to the possibility of a race | ||
720 | might come in, we attempt to re-add ourselves to the poll list. | ||
721 | |||
722 | |||
723 | |||
724 | |||
725 | APPENDIX 3: Scheduling issues. | ||
726 | ============================== | ||
727 | As seen NAPI moves processing to softirq level. Linux uses the ksoftirqd as the | ||
728 | general solution to schedule softirq's to run before next interrupt and by putting | ||
729 | them under scheduler control. Also this prevents consecutive softirq's from | ||
730 | monopolize the CPU. This also have the effect that the priority of ksoftirq needs | ||
731 | to be considered when running very CPU-intensive applications and networking to | ||
732 | get the proper balance of softirq/user balance. Increasing ksoftirq priority to 0 | ||
733 | (eventually more) is reported cure problems with low network performance at high | ||
734 | CPU load. | ||
735 | |||
736 | Most used processes in a GIGE router: | ||
737 | USER PID %CPU %MEM SIZE RSS TTY STAT START TIME COMMAND | ||
738 | root 3 0.2 0.0 0 0 ? RWN Aug 15 602:00 (ksoftirqd_CPU0) | ||
739 | root 232 0.0 7.9 41400 40884 ? S Aug 15 74:12 gated | ||
740 | |||
741 | -------------------------------------------------------------------- | ||
742 | |||
743 | relevant sites: | ||
744 | ================== | ||
745 | ftp://robur.slu.se/pub/Linux/net-development/NAPI/ | ||
746 | |||
747 | |||
748 | -------------------------------------------------------------------- | ||
749 | TODO: Write net-skeleton.c driver. | ||
750 | ------------------------------------------------------------- | ||
751 | |||
752 | Authors: | ||
753 | ======== | ||
754 | Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | ||
755 | Jamal Hadi Salim <hadi@cyberus.ca> | ||
756 | Robert Olsson <Robert.Olsson@data.slu.se> | ||
757 | |||
758 | Acknowledgements: | ||
759 | ================ | ||
760 | People who made this document better: | ||
761 | |||
762 | Lennert Buytenhek <buytenh@gnu.org> | ||
763 | Andrew Morton <akpm@zip.com.au> | ||
764 | Manfred Spraul <manfred@colorfullife.com> | ||
765 | Donald Becker <becker@scyld.com> | ||
766 | Jeff Garzik <jgarzik@pobox.com> | ||
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt index 37869295fc70..9f7be9b7785e 100644 --- a/Documentation/networking/netdevices.txt +++ b/Documentation/networking/netdevices.txt | |||
@@ -95,9 +95,13 @@ dev->set_multicast_list: | |||
95 | Synchronization: netif_tx_lock spinlock. | 95 | Synchronization: netif_tx_lock spinlock. |
96 | Context: BHs disabled | 96 | Context: BHs disabled |
97 | 97 | ||
98 | dev->poll: | 98 | struct napi_struct synchronization rules |
99 | Synchronization: __LINK_STATE_RX_SCHED bit in dev->state. See | 99 | ======================================== |
100 | dev_close code and comments in net/core/dev.c for more info. | 100 | napi->poll: |
101 | Synchronization: NAPI_STATE_SCHED bit in napi->state. Device | ||
102 | driver's dev->close method will invoke napi_disable() on | ||
103 | all NAPI instances which will do a sleeping poll on the | ||
104 | NAPI_STATE_SCHED napi->state bit, waiting for all pending | ||
105 | NAPI activity to cease. | ||
101 | Context: softirq | 106 | Context: softirq |
102 | will be called with interrupts disabled by netconsole. | 107 | will be called with interrupts disabled by netconsole. |
103 | |||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 285c143115cc..35f3ca42bd60 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -228,6 +228,8 @@ struct ipoib_dev_priv { | |||
228 | 228 | ||
229 | struct net_device *dev; | 229 | struct net_device *dev; |
230 | 230 | ||
231 | struct napi_struct napi; | ||
232 | |||
231 | unsigned long flags; | 233 | unsigned long flags; |
232 | 234 | ||
233 | struct mutex mcast_mutex; | 235 | struct mutex mcast_mutex; |
@@ -351,7 +353,7 @@ extern struct workqueue_struct *ipoib_workqueue; | |||
351 | 353 | ||
352 | /* functions */ | 354 | /* functions */ |
353 | 355 | ||
354 | int ipoib_poll(struct net_device *dev, int *budget); | 356 | int ipoib_poll(struct napi_struct *napi, int budget); |
355 | void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); | 357 | void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); |
356 | 358 | ||
357 | struct ipoib_ah *ipoib_create_ah(struct net_device *dev, | 359 | struct ipoib_ah *ipoib_create_ah(struct net_device *dev, |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 10944888cffd..481e4b6bd949 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -281,63 +281,58 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
281 | wc->status, wr_id, wc->vendor_err); | 281 | wc->status, wr_id, wc->vendor_err); |
282 | } | 282 | } |
283 | 283 | ||
284 | int ipoib_poll(struct net_device *dev, int *budget) | 284 | int ipoib_poll(struct napi_struct *napi, int budget) |
285 | { | 285 | { |
286 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 286 | struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); |
287 | int max = min(*budget, dev->quota); | 287 | struct net_device *dev = priv->dev; |
288 | int done; | 288 | int done; |
289 | int t; | 289 | int t; |
290 | int empty; | ||
291 | int n, i; | 290 | int n, i; |
292 | 291 | ||
293 | done = 0; | 292 | done = 0; |
294 | empty = 0; | ||
295 | 293 | ||
296 | while (max) { | 294 | poll_more: |
295 | while (done < budget) { | ||
296 | int max = (budget - done); | ||
297 | |||
297 | t = min(IPOIB_NUM_WC, max); | 298 | t = min(IPOIB_NUM_WC, max); |
298 | n = ib_poll_cq(priv->cq, t, priv->ibwc); | 299 | n = ib_poll_cq(priv->cq, t, priv->ibwc); |
299 | 300 | ||
300 | for (i = 0; i < n; ++i) { | 301 | for (i = 0; i < n; i++) { |
301 | struct ib_wc *wc = priv->ibwc + i; | 302 | struct ib_wc *wc = priv->ibwc + i; |
302 | 303 | ||
303 | if (wc->wr_id & IPOIB_CM_OP_SRQ) { | 304 | if (wc->wr_id & IPOIB_CM_OP_SRQ) { |
304 | ++done; | 305 | ++done; |
305 | --max; | ||
306 | ipoib_cm_handle_rx_wc(dev, wc); | 306 | ipoib_cm_handle_rx_wc(dev, wc); |
307 | } else if (wc->wr_id & IPOIB_OP_RECV) { | 307 | } else if (wc->wr_id & IPOIB_OP_RECV) { |
308 | ++done; | 308 | ++done; |
309 | --max; | ||
310 | ipoib_ib_handle_rx_wc(dev, wc); | 309 | ipoib_ib_handle_rx_wc(dev, wc); |
311 | } else | 310 | } else |
312 | ipoib_ib_handle_tx_wc(dev, wc); | 311 | ipoib_ib_handle_tx_wc(dev, wc); |
313 | } | 312 | } |
314 | 313 | ||
315 | if (n != t) { | 314 | if (n != t) |
316 | empty = 1; | ||
317 | break; | 315 | break; |
318 | } | ||
319 | } | 316 | } |
320 | 317 | ||
321 | dev->quota -= done; | 318 | if (done < budget) { |
322 | *budget -= done; | 319 | netif_rx_complete(dev, napi); |
323 | |||
324 | if (empty) { | ||
325 | netif_rx_complete(dev); | ||
326 | if (unlikely(ib_req_notify_cq(priv->cq, | 320 | if (unlikely(ib_req_notify_cq(priv->cq, |
327 | IB_CQ_NEXT_COMP | | 321 | IB_CQ_NEXT_COMP | |
328 | IB_CQ_REPORT_MISSED_EVENTS)) && | 322 | IB_CQ_REPORT_MISSED_EVENTS)) && |
329 | netif_rx_reschedule(dev, 0)) | 323 | netif_rx_reschedule(dev, napi)) |
330 | return 1; | 324 | goto poll_more; |
331 | |||
332 | return 0; | ||
333 | } | 325 | } |
334 | 326 | ||
335 | return 1; | 327 | return done; |
336 | } | 328 | } |
337 | 329 | ||
338 | void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) | 330 | void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) |
339 | { | 331 | { |
340 | netif_rx_schedule(dev_ptr); | 332 | struct net_device *dev = dev_ptr; |
333 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
334 | |||
335 | netif_rx_schedule(dev, &priv->napi); | ||
341 | } | 336 | } |
342 | 337 | ||
343 | static inline int post_send(struct ipoib_dev_priv *priv, | 338 | static inline int post_send(struct ipoib_dev_priv *priv, |
@@ -577,7 +572,6 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) | |||
577 | int i; | 572 | int i; |
578 | 573 | ||
579 | clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); | 574 | clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); |
580 | netif_poll_disable(dev); | ||
581 | 575 | ||
582 | ipoib_cm_dev_stop(dev); | 576 | ipoib_cm_dev_stop(dev); |
583 | 577 | ||
@@ -660,7 +654,6 @@ timeout: | |||
660 | msleep(1); | 654 | msleep(1); |
661 | } | 655 | } |
662 | 656 | ||
663 | netif_poll_enable(dev); | ||
664 | ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); | 657 | ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); |
665 | 658 | ||
666 | return 0; | 659 | return 0; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 894b1dcdf3eb..a59ff07ec3cd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -98,16 +98,20 @@ int ipoib_open(struct net_device *dev) | |||
98 | 98 | ||
99 | ipoib_dbg(priv, "bringing up interface\n"); | 99 | ipoib_dbg(priv, "bringing up interface\n"); |
100 | 100 | ||
101 | napi_enable(&priv->napi); | ||
101 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); | 102 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); |
102 | 103 | ||
103 | if (ipoib_pkey_dev_delay_open(dev)) | 104 | if (ipoib_pkey_dev_delay_open(dev)) |
104 | return 0; | 105 | return 0; |
105 | 106 | ||
106 | if (ipoib_ib_dev_open(dev)) | 107 | if (ipoib_ib_dev_open(dev)) { |
108 | napi_disable(&priv->napi); | ||
107 | return -EINVAL; | 109 | return -EINVAL; |
110 | } | ||
108 | 111 | ||
109 | if (ipoib_ib_dev_up(dev)) { | 112 | if (ipoib_ib_dev_up(dev)) { |
110 | ipoib_ib_dev_stop(dev, 1); | 113 | ipoib_ib_dev_stop(dev, 1); |
114 | napi_disable(&priv->napi); | ||
111 | return -EINVAL; | 115 | return -EINVAL; |
112 | } | 116 | } |
113 | 117 | ||
@@ -140,6 +144,7 @@ static int ipoib_stop(struct net_device *dev) | |||
140 | ipoib_dbg(priv, "stopping interface\n"); | 144 | ipoib_dbg(priv, "stopping interface\n"); |
141 | 145 | ||
142 | clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); | 146 | clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); |
147 | napi_disable(&priv->napi); | ||
143 | 148 | ||
144 | netif_stop_queue(dev); | 149 | netif_stop_queue(dev); |
145 | 150 | ||
@@ -948,8 +953,8 @@ static void ipoib_setup(struct net_device *dev) | |||
948 | dev->hard_header = ipoib_hard_header; | 953 | dev->hard_header = ipoib_hard_header; |
949 | dev->set_multicast_list = ipoib_set_mcast_list; | 954 | dev->set_multicast_list = ipoib_set_mcast_list; |
950 | dev->neigh_setup = ipoib_neigh_setup_dev; | 955 | dev->neigh_setup = ipoib_neigh_setup_dev; |
951 | dev->poll = ipoib_poll; | 956 | |
952 | dev->weight = 100; | 957 | netif_napi_add(dev, &priv->napi, ipoib_poll, 100); |
953 | 958 | ||
954 | dev->watchdog_timeo = HZ; | 959 | dev->watchdog_timeo = HZ; |
955 | 960 | ||
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index a79f28c7a100..7f18ca23d9f8 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -334,6 +334,8 @@ struct cp_private { | |||
334 | spinlock_t lock; | 334 | spinlock_t lock; |
335 | u32 msg_enable; | 335 | u32 msg_enable; |
336 | 336 | ||
337 | struct napi_struct napi; | ||
338 | |||
337 | struct pci_dev *pdev; | 339 | struct pci_dev *pdev; |
338 | u32 rx_config; | 340 | u32 rx_config; |
339 | u16 cpcmd; | 341 | u16 cpcmd; |
@@ -501,12 +503,12 @@ static inline unsigned int cp_rx_csum_ok (u32 status) | |||
501 | return 0; | 503 | return 0; |
502 | } | 504 | } |
503 | 505 | ||
504 | static int cp_rx_poll (struct net_device *dev, int *budget) | 506 | static int cp_rx_poll(struct napi_struct *napi, int budget) |
505 | { | 507 | { |
506 | struct cp_private *cp = netdev_priv(dev); | 508 | struct cp_private *cp = container_of(napi, struct cp_private, napi); |
507 | unsigned rx_tail = cp->rx_tail; | 509 | struct net_device *dev = cp->dev; |
508 | unsigned rx_work = dev->quota; | 510 | unsigned int rx_tail = cp->rx_tail; |
509 | unsigned rx; | 511 | int rx; |
510 | 512 | ||
511 | rx_status_loop: | 513 | rx_status_loop: |
512 | rx = 0; | 514 | rx = 0; |
@@ -588,33 +590,28 @@ rx_next: | |||
588 | desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); | 590 | desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); |
589 | rx_tail = NEXT_RX(rx_tail); | 591 | rx_tail = NEXT_RX(rx_tail); |
590 | 592 | ||
591 | if (!rx_work--) | 593 | if (rx >= budget) |
592 | break; | 594 | break; |
593 | } | 595 | } |
594 | 596 | ||
595 | cp->rx_tail = rx_tail; | 597 | cp->rx_tail = rx_tail; |
596 | 598 | ||
597 | dev->quota -= rx; | ||
598 | *budget -= rx; | ||
599 | |||
600 | /* if we did not reach work limit, then we're done with | 599 | /* if we did not reach work limit, then we're done with |
601 | * this round of polling | 600 | * this round of polling |
602 | */ | 601 | */ |
603 | if (rx_work) { | 602 | if (rx < budget) { |
604 | unsigned long flags; | 603 | unsigned long flags; |
605 | 604 | ||
606 | if (cpr16(IntrStatus) & cp_rx_intr_mask) | 605 | if (cpr16(IntrStatus) & cp_rx_intr_mask) |
607 | goto rx_status_loop; | 606 | goto rx_status_loop; |
608 | 607 | ||
609 | local_irq_save(flags); | 608 | spin_lock_irqsave(&cp->lock, flags); |
610 | cpw16_f(IntrMask, cp_intr_mask); | 609 | cpw16_f(IntrMask, cp_intr_mask); |
611 | __netif_rx_complete(dev); | 610 | __netif_rx_complete(dev, napi); |
612 | local_irq_restore(flags); | 611 | spin_unlock_irqrestore(&cp->lock, flags); |
613 | |||
614 | return 0; /* done */ | ||
615 | } | 612 | } |
616 | 613 | ||
617 | return 1; /* not done */ | 614 | return rx; |
618 | } | 615 | } |
619 | 616 | ||
620 | static irqreturn_t cp_interrupt (int irq, void *dev_instance) | 617 | static irqreturn_t cp_interrupt (int irq, void *dev_instance) |
@@ -647,9 +644,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) | |||
647 | } | 644 | } |
648 | 645 | ||
649 | if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) | 646 | if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) |
650 | if (netif_rx_schedule_prep(dev)) { | 647 | if (netif_rx_schedule_prep(dev, &cp->napi)) { |
651 | cpw16_f(IntrMask, cp_norx_intr_mask); | 648 | cpw16_f(IntrMask, cp_norx_intr_mask); |
652 | __netif_rx_schedule(dev); | 649 | __netif_rx_schedule(dev, &cp->napi); |
653 | } | 650 | } |
654 | 651 | ||
655 | if (status & (TxOK | TxErr | TxEmpty | SWInt)) | 652 | if (status & (TxOK | TxErr | TxEmpty | SWInt)) |
@@ -1175,6 +1172,8 @@ static int cp_open (struct net_device *dev) | |||
1175 | if (rc) | 1172 | if (rc) |
1176 | return rc; | 1173 | return rc; |
1177 | 1174 | ||
1175 | napi_enable(&cp->napi); | ||
1176 | |||
1178 | cp_init_hw(cp); | 1177 | cp_init_hw(cp); |
1179 | 1178 | ||
1180 | rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); | 1179 | rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); |
@@ -1188,6 +1187,7 @@ static int cp_open (struct net_device *dev) | |||
1188 | return 0; | 1187 | return 0; |
1189 | 1188 | ||
1190 | err_out_hw: | 1189 | err_out_hw: |
1190 | napi_disable(&cp->napi); | ||
1191 | cp_stop_hw(cp); | 1191 | cp_stop_hw(cp); |
1192 | cp_free_rings(cp); | 1192 | cp_free_rings(cp); |
1193 | return rc; | 1193 | return rc; |
@@ -1198,6 +1198,8 @@ static int cp_close (struct net_device *dev) | |||
1198 | struct cp_private *cp = netdev_priv(dev); | 1198 | struct cp_private *cp = netdev_priv(dev); |
1199 | unsigned long flags; | 1199 | unsigned long flags; |
1200 | 1200 | ||
1201 | napi_disable(&cp->napi); | ||
1202 | |||
1201 | if (netif_msg_ifdown(cp)) | 1203 | if (netif_msg_ifdown(cp)) |
1202 | printk(KERN_DEBUG "%s: disabling interface\n", dev->name); | 1204 | printk(KERN_DEBUG "%s: disabling interface\n", dev->name); |
1203 | 1205 | ||
@@ -1933,11 +1935,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1933 | dev->hard_start_xmit = cp_start_xmit; | 1935 | dev->hard_start_xmit = cp_start_xmit; |
1934 | dev->get_stats = cp_get_stats; | 1936 | dev->get_stats = cp_get_stats; |
1935 | dev->do_ioctl = cp_ioctl; | 1937 | dev->do_ioctl = cp_ioctl; |
1936 | dev->poll = cp_rx_poll; | ||
1937 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1938 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1938 | dev->poll_controller = cp_poll_controller; | 1939 | dev->poll_controller = cp_poll_controller; |
1939 | #endif | 1940 | #endif |
1940 | dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ | 1941 | netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); |
1941 | #ifdef BROKEN | 1942 | #ifdef BROKEN |
1942 | dev->change_mtu = cp_change_mtu; | 1943 | dev->change_mtu = cp_change_mtu; |
1943 | #endif | 1944 | #endif |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index f4e4298d24b9..20af6baecfcb 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -573,6 +573,8 @@ struct rtl8139_private { | |||
573 | int drv_flags; | 573 | int drv_flags; |
574 | struct pci_dev *pci_dev; | 574 | struct pci_dev *pci_dev; |
575 | u32 msg_enable; | 575 | u32 msg_enable; |
576 | struct napi_struct napi; | ||
577 | struct net_device *dev; | ||
576 | struct net_device_stats stats; | 578 | struct net_device_stats stats; |
577 | unsigned char *rx_ring; | 579 | unsigned char *rx_ring; |
578 | unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ | 580 | unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ |
@@ -625,10 +627,10 @@ static void rtl8139_tx_timeout (struct net_device *dev); | |||
625 | static void rtl8139_init_ring (struct net_device *dev); | 627 | static void rtl8139_init_ring (struct net_device *dev); |
626 | static int rtl8139_start_xmit (struct sk_buff *skb, | 628 | static int rtl8139_start_xmit (struct sk_buff *skb, |
627 | struct net_device *dev); | 629 | struct net_device *dev); |
628 | static int rtl8139_poll(struct net_device *dev, int *budget); | ||
629 | #ifdef CONFIG_NET_POLL_CONTROLLER | 630 | #ifdef CONFIG_NET_POLL_CONTROLLER |
630 | static void rtl8139_poll_controller(struct net_device *dev); | 631 | static void rtl8139_poll_controller(struct net_device *dev); |
631 | #endif | 632 | #endif |
633 | static int rtl8139_poll(struct napi_struct *napi, int budget); | ||
632 | static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); | 634 | static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); |
633 | static int rtl8139_close (struct net_device *dev); | 635 | static int rtl8139_close (struct net_device *dev); |
634 | static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); | 636 | static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); |
@@ -963,6 +965,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, | |||
963 | 965 | ||
964 | assert (dev != NULL); | 966 | assert (dev != NULL); |
965 | tp = netdev_priv(dev); | 967 | tp = netdev_priv(dev); |
968 | tp->dev = dev; | ||
966 | 969 | ||
967 | ioaddr = tp->mmio_addr; | 970 | ioaddr = tp->mmio_addr; |
968 | assert (ioaddr != NULL); | 971 | assert (ioaddr != NULL); |
@@ -976,8 +979,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, | |||
976 | /* The Rtl8139-specific entries in the device structure. */ | 979 | /* The Rtl8139-specific entries in the device structure. */ |
977 | dev->open = rtl8139_open; | 980 | dev->open = rtl8139_open; |
978 | dev->hard_start_xmit = rtl8139_start_xmit; | 981 | dev->hard_start_xmit = rtl8139_start_xmit; |
979 | dev->poll = rtl8139_poll; | 982 | netif_napi_add(dev, &tp->napi, rtl8139_poll, 64); |
980 | dev->weight = 64; | ||
981 | dev->stop = rtl8139_close; | 983 | dev->stop = rtl8139_close; |
982 | dev->get_stats = rtl8139_get_stats; | 984 | dev->get_stats = rtl8139_get_stats; |
983 | dev->set_multicast_list = rtl8139_set_rx_mode; | 985 | dev->set_multicast_list = rtl8139_set_rx_mode; |
@@ -1332,6 +1334,8 @@ static int rtl8139_open (struct net_device *dev) | |||
1332 | 1334 | ||
1333 | } | 1335 | } |
1334 | 1336 | ||
1337 | napi_enable(&tp->napi); | ||
1338 | |||
1335 | tp->mii.full_duplex = tp->mii.force_media; | 1339 | tp->mii.full_duplex = tp->mii.force_media; |
1336 | tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; | 1340 | tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; |
1337 | 1341 | ||
@@ -2103,39 +2107,32 @@ static void rtl8139_weird_interrupt (struct net_device *dev, | |||
2103 | } | 2107 | } |
2104 | } | 2108 | } |
2105 | 2109 | ||
2106 | static int rtl8139_poll(struct net_device *dev, int *budget) | 2110 | static int rtl8139_poll(struct napi_struct *napi, int budget) |
2107 | { | 2111 | { |
2108 | struct rtl8139_private *tp = netdev_priv(dev); | 2112 | struct rtl8139_private *tp = container_of(napi, struct rtl8139_private, napi); |
2113 | struct net_device *dev = tp->dev; | ||
2109 | void __iomem *ioaddr = tp->mmio_addr; | 2114 | void __iomem *ioaddr = tp->mmio_addr; |
2110 | int orig_budget = min(*budget, dev->quota); | 2115 | int work_done; |
2111 | int done = 1; | ||
2112 | 2116 | ||
2113 | spin_lock(&tp->rx_lock); | 2117 | spin_lock(&tp->rx_lock); |
2114 | if (likely(RTL_R16(IntrStatus) & RxAckBits)) { | 2118 | work_done = 0; |
2115 | int work_done; | 2119 | if (likely(RTL_R16(IntrStatus) & RxAckBits)) |
2116 | 2120 | work_done += rtl8139_rx(dev, tp, budget); | |
2117 | work_done = rtl8139_rx(dev, tp, orig_budget); | ||
2118 | if (likely(work_done > 0)) { | ||
2119 | *budget -= work_done; | ||
2120 | dev->quota -= work_done; | ||
2121 | done = (work_done < orig_budget); | ||
2122 | } | ||
2123 | } | ||
2124 | 2121 | ||
2125 | if (done) { | 2122 | if (work_done < budget) { |
2126 | unsigned long flags; | 2123 | unsigned long flags; |
2127 | /* | 2124 | /* |
2128 | * Order is important since data can get interrupted | 2125 | * Order is important since data can get interrupted |
2129 | * again when we think we are done. | 2126 | * again when we think we are done. |
2130 | */ | 2127 | */ |
2131 | local_irq_save(flags); | 2128 | spin_lock_irqsave(&tp->lock, flags); |
2132 | RTL_W16_F(IntrMask, rtl8139_intr_mask); | 2129 | RTL_W16_F(IntrMask, rtl8139_intr_mask); |
2133 | __netif_rx_complete(dev); | 2130 | __netif_rx_complete(dev, napi); |
2134 | local_irq_restore(flags); | 2131 | spin_unlock_irqrestore(&tp->lock, flags); |
2135 | } | 2132 | } |
2136 | spin_unlock(&tp->rx_lock); | 2133 | spin_unlock(&tp->rx_lock); |
2137 | 2134 | ||
2138 | return !done; | 2135 | return work_done; |
2139 | } | 2136 | } |
2140 | 2137 | ||
2141 | /* The interrupt handler does all of the Rx thread work and cleans up | 2138 | /* The interrupt handler does all of the Rx thread work and cleans up |
@@ -2180,9 +2177,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) | |||
2180 | /* Receive packets are processed by poll routine. | 2177 | /* Receive packets are processed by poll routine. |
2181 | If not running start it now. */ | 2178 | If not running start it now. */ |
2182 | if (status & RxAckBits){ | 2179 | if (status & RxAckBits){ |
2183 | if (netif_rx_schedule_prep(dev)) { | 2180 | if (netif_rx_schedule_prep(dev, &tp->napi)) { |
2184 | RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); | 2181 | RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); |
2185 | __netif_rx_schedule (dev); | 2182 | __netif_rx_schedule(dev, &tp->napi); |
2186 | } | 2183 | } |
2187 | } | 2184 | } |
2188 | 2185 | ||
@@ -2223,7 +2220,8 @@ static int rtl8139_close (struct net_device *dev) | |||
2223 | void __iomem *ioaddr = tp->mmio_addr; | 2220 | void __iomem *ioaddr = tp->mmio_addr; |
2224 | unsigned long flags; | 2221 | unsigned long flags; |
2225 | 2222 | ||
2226 | netif_stop_queue (dev); | 2223 | netif_stop_queue(dev); |
2224 | napi_disable(&tp->napi); | ||
2227 | 2225 | ||
2228 | if (netif_msg_ifdown(tp)) | 2226 | if (netif_msg_ifdown(tp)) |
2229 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n", | 2227 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n", |
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index a61b2f89fc33..cf06fc067e92 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c | |||
@@ -723,9 +723,10 @@ static int amd8111e_tx(struct net_device *dev) | |||
723 | 723 | ||
724 | #ifdef CONFIG_AMD8111E_NAPI | 724 | #ifdef CONFIG_AMD8111E_NAPI |
725 | /* This function handles the driver receive operation in polling mode */ | 725 | /* This function handles the driver receive operation in polling mode */ |
726 | static int amd8111e_rx_poll(struct net_device *dev, int * budget) | 726 | static int amd8111e_rx_poll(struct napi_struct *napi, int budget) |
727 | { | 727 | { |
728 | struct amd8111e_priv *lp = netdev_priv(dev); | 728 | struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi); |
729 | struct net_device *dev = lp->amd8111e_net_dev; | ||
729 | int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; | 730 | int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; |
730 | void __iomem *mmio = lp->mmio; | 731 | void __iomem *mmio = lp->mmio; |
731 | struct sk_buff *skb,*new_skb; | 732 | struct sk_buff *skb,*new_skb; |
@@ -737,7 +738,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) | |||
737 | #if AMD8111E_VLAN_TAG_USED | 738 | #if AMD8111E_VLAN_TAG_USED |
738 | short vtag; | 739 | short vtag; |
739 | #endif | 740 | #endif |
740 | int rx_pkt_limit = dev->quota; | 741 | int rx_pkt_limit = budget; |
741 | unsigned long flags; | 742 | unsigned long flags; |
742 | 743 | ||
743 | do{ | 744 | do{ |
@@ -838,21 +839,14 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) | |||
838 | } while(intr0 & RINT0); | 839 | } while(intr0 & RINT0); |
839 | 840 | ||
840 | /* Receive descriptor is empty now */ | 841 | /* Receive descriptor is empty now */ |
841 | dev->quota -= num_rx_pkt; | ||
842 | *budget -= num_rx_pkt; | ||
843 | |||
844 | spin_lock_irqsave(&lp->lock, flags); | 842 | spin_lock_irqsave(&lp->lock, flags); |
845 | netif_rx_complete(dev); | 843 | __netif_rx_complete(dev, napi); |
846 | writel(VAL0|RINTEN0, mmio + INTEN0); | 844 | writel(VAL0|RINTEN0, mmio + INTEN0); |
847 | writel(VAL2 | RDMD0, mmio + CMD0); | 845 | writel(VAL2 | RDMD0, mmio + CMD0); |
848 | spin_unlock_irqrestore(&lp->lock, flags); | 846 | spin_unlock_irqrestore(&lp->lock, flags); |
849 | return 0; | ||
850 | 847 | ||
851 | rx_not_empty: | 848 | rx_not_empty: |
852 | /* Do not call a netif_rx_complete */ | 849 | return num_rx_pkt; |
853 | dev->quota -= num_rx_pkt; | ||
854 | *budget -= num_rx_pkt; | ||
855 | return 1; | ||
856 | } | 850 | } |
857 | 851 | ||
858 | #else | 852 | #else |
@@ -1287,11 +1281,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) | |||
1287 | /* Check if Receive Interrupt has occurred. */ | 1281 | /* Check if Receive Interrupt has occurred. */ |
1288 | #ifdef CONFIG_AMD8111E_NAPI | 1282 | #ifdef CONFIG_AMD8111E_NAPI |
1289 | if(intr0 & RINT0){ | 1283 | if(intr0 & RINT0){ |
1290 | if(netif_rx_schedule_prep(dev)){ | 1284 | if(netif_rx_schedule_prep(dev, &lp->napi)){ |
1291 | /* Disable receive interupts */ | 1285 | /* Disable receive interupts */ |
1292 | writel(RINTEN0, mmio + INTEN0); | 1286 | writel(RINTEN0, mmio + INTEN0); |
1293 | /* Schedule a polling routine */ | 1287 | /* Schedule a polling routine */ |
1294 | __netif_rx_schedule(dev); | 1288 | __netif_rx_schedule(dev, &lp->napi); |
1295 | } | 1289 | } |
1296 | else if (intren0 & RINTEN0) { | 1290 | else if (intren0 & RINTEN0) { |
1297 | printk("************Driver bug! \ | 1291 | printk("************Driver bug! \ |
@@ -1345,6 +1339,8 @@ static int amd8111e_close(struct net_device * dev) | |||
1345 | struct amd8111e_priv *lp = netdev_priv(dev); | 1339 | struct amd8111e_priv *lp = netdev_priv(dev); |
1346 | netif_stop_queue(dev); | 1340 | netif_stop_queue(dev); |
1347 | 1341 | ||
1342 | napi_disable(&lp->napi); | ||
1343 | |||
1348 | spin_lock_irq(&lp->lock); | 1344 | spin_lock_irq(&lp->lock); |
1349 | 1345 | ||
1350 | amd8111e_disable_interrupt(lp); | 1346 | amd8111e_disable_interrupt(lp); |
@@ -1375,12 +1371,15 @@ static int amd8111e_open(struct net_device * dev ) | |||
1375 | dev->name, dev)) | 1371 | dev->name, dev)) |
1376 | return -EAGAIN; | 1372 | return -EAGAIN; |
1377 | 1373 | ||
1374 | napi_enable(&lp->napi); | ||
1375 | |||
1378 | spin_lock_irq(&lp->lock); | 1376 | spin_lock_irq(&lp->lock); |
1379 | 1377 | ||
1380 | amd8111e_init_hw_default(lp); | 1378 | amd8111e_init_hw_default(lp); |
1381 | 1379 | ||
1382 | if(amd8111e_restart(dev)){ | 1380 | if(amd8111e_restart(dev)){ |
1383 | spin_unlock_irq(&lp->lock); | 1381 | spin_unlock_irq(&lp->lock); |
1382 | napi_disable(&lp->napi); | ||
1384 | if (dev->irq) | 1383 | if (dev->irq) |
1385 | free_irq(dev->irq, dev); | 1384 | free_irq(dev->irq, dev); |
1386 | return -ENOMEM; | 1385 | return -ENOMEM; |
@@ -2031,8 +2030,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, | |||
2031 | dev->tx_timeout = amd8111e_tx_timeout; | 2030 | dev->tx_timeout = amd8111e_tx_timeout; |
2032 | dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; | 2031 | dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; |
2033 | #ifdef CONFIG_AMD8111E_NAPI | 2032 | #ifdef CONFIG_AMD8111E_NAPI |
2034 | dev->poll = amd8111e_rx_poll; | 2033 | netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); |
2035 | dev->weight = 32; | ||
2036 | #endif | 2034 | #endif |
2037 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2035 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2038 | dev->poll_controller = amd8111e_poll; | 2036 | dev->poll_controller = amd8111e_poll; |
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h index e65080a5994a..612e653610e1 100644 --- a/drivers/net/amd8111e.h +++ b/drivers/net/amd8111e.h | |||
@@ -763,6 +763,8 @@ struct amd8111e_priv{ | |||
763 | /* Reg memory mapped address */ | 763 | /* Reg memory mapped address */ |
764 | void __iomem *mmio; | 764 | void __iomem *mmio; |
765 | 765 | ||
766 | struct napi_struct napi; | ||
767 | |||
766 | spinlock_t lock; /* Guard lock */ | 768 | spinlock_t lock; /* Guard lock */ |
767 | unsigned long rx_idx, tx_idx; /* The next free ring entry */ | 769 | unsigned long rx_idx, tx_idx; /* The next free ring entry */ |
768 | unsigned long tx_complete_idx; | 770 | unsigned long tx_complete_idx; |
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index f6ece1d43f6e..7f016f3d5bf0 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -169,6 +169,9 @@ struct ep93xx_priv | |||
169 | spinlock_t tx_pending_lock; | 169 | spinlock_t tx_pending_lock; |
170 | unsigned int tx_pending; | 170 | unsigned int tx_pending; |
171 | 171 | ||
172 | struct net_device *dev; | ||
173 | struct napi_struct napi; | ||
174 | |||
172 | struct net_device_stats stats; | 175 | struct net_device_stats stats; |
173 | 176 | ||
174 | struct mii_if_info mii; | 177 | struct mii_if_info mii; |
@@ -190,15 +193,11 @@ static struct net_device_stats *ep93xx_get_stats(struct net_device *dev) | |||
190 | return &(ep->stats); | 193 | return &(ep->stats); |
191 | } | 194 | } |
192 | 195 | ||
193 | static int ep93xx_rx(struct net_device *dev, int *budget) | 196 | static int ep93xx_rx(struct net_device *dev, int processed, int budget) |
194 | { | 197 | { |
195 | struct ep93xx_priv *ep = netdev_priv(dev); | 198 | struct ep93xx_priv *ep = netdev_priv(dev); |
196 | int rx_done; | ||
197 | int processed; | ||
198 | 199 | ||
199 | rx_done = 0; | 200 | while (processed < budget) { |
200 | processed = 0; | ||
201 | while (*budget > 0) { | ||
202 | int entry; | 201 | int entry; |
203 | struct ep93xx_rstat *rstat; | 202 | struct ep93xx_rstat *rstat; |
204 | u32 rstat0; | 203 | u32 rstat0; |
@@ -211,10 +210,8 @@ static int ep93xx_rx(struct net_device *dev, int *budget) | |||
211 | 210 | ||
212 | rstat0 = rstat->rstat0; | 211 | rstat0 = rstat->rstat0; |
213 | rstat1 = rstat->rstat1; | 212 | rstat1 = rstat->rstat1; |
214 | if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) { | 213 | if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) |
215 | rx_done = 1; | ||
216 | break; | 214 | break; |
217 | } | ||
218 | 215 | ||
219 | rstat->rstat0 = 0; | 216 | rstat->rstat0 = 0; |
220 | rstat->rstat1 = 0; | 217 | rstat->rstat1 = 0; |
@@ -275,8 +272,6 @@ static int ep93xx_rx(struct net_device *dev, int *budget) | |||
275 | err: | 272 | err: |
276 | ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); | 273 | ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); |
277 | processed++; | 274 | processed++; |
278 | dev->quota--; | ||
279 | (*budget)--; | ||
280 | } | 275 | } |
281 | 276 | ||
282 | if (processed) { | 277 | if (processed) { |
@@ -284,7 +279,7 @@ err: | |||
284 | wrw(ep, REG_RXSTSENQ, processed); | 279 | wrw(ep, REG_RXSTSENQ, processed); |
285 | } | 280 | } |
286 | 281 | ||
287 | return !rx_done; | 282 | return processed; |
288 | } | 283 | } |
289 | 284 | ||
290 | static int ep93xx_have_more_rx(struct ep93xx_priv *ep) | 285 | static int ep93xx_have_more_rx(struct ep93xx_priv *ep) |
@@ -293,36 +288,32 @@ static int ep93xx_have_more_rx(struct ep93xx_priv *ep) | |||
293 | return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); | 288 | return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); |
294 | } | 289 | } |
295 | 290 | ||
296 | static int ep93xx_poll(struct net_device *dev, int *budget) | 291 | static int ep93xx_poll(struct napi_struct *napi, int budget) |
297 | { | 292 | { |
298 | struct ep93xx_priv *ep = netdev_priv(dev); | 293 | struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); |
299 | 294 | struct net_device *dev = ep->dev; | |
300 | /* | 295 | int rx = 0; |
301 | * @@@ Have to stop polling if device is downed while we | ||
302 | * are polling. | ||
303 | */ | ||
304 | 296 | ||
305 | poll_some_more: | 297 | poll_some_more: |
306 | if (ep93xx_rx(dev, budget)) | 298 | rx = ep93xx_rx(dev, rx, budget); |
307 | return 1; | 299 | if (rx < budget) { |
308 | 300 | int more = 0; | |
309 | netif_rx_complete(dev); | 301 | |
310 | 302 | spin_lock_irq(&ep->rx_lock); | |
311 | spin_lock_irq(&ep->rx_lock); | 303 | __netif_rx_complete(dev, napi); |
312 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); | 304 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); |
313 | if (ep93xx_have_more_rx(ep)) { | 305 | if (ep93xx_have_more_rx(ep)) { |
314 | wrl(ep, REG_INTEN, REG_INTEN_TX); | 306 | wrl(ep, REG_INTEN, REG_INTEN_TX); |
315 | wrl(ep, REG_INTSTSP, REG_INTSTS_RX); | 307 | wrl(ep, REG_INTSTSP, REG_INTSTS_RX); |
308 | more = 1; | ||
309 | } | ||
316 | spin_unlock_irq(&ep->rx_lock); | 310 | spin_unlock_irq(&ep->rx_lock); |
317 | 311 | ||
318 | if (netif_rx_reschedule(dev, 0)) | 312 | if (more && netif_rx_reschedule(dev, napi)) |
319 | goto poll_some_more; | 313 | goto poll_some_more; |
320 | |||
321 | return 0; | ||
322 | } | 314 | } |
323 | spin_unlock_irq(&ep->rx_lock); | ||
324 | 315 | ||
325 | return 0; | 316 | return rx; |
326 | } | 317 | } |
327 | 318 | ||
328 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) | 319 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -426,9 +417,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id) | |||
426 | 417 | ||
427 | if (status & REG_INTSTS_RX) { | 418 | if (status & REG_INTSTS_RX) { |
428 | spin_lock(&ep->rx_lock); | 419 | spin_lock(&ep->rx_lock); |
429 | if (likely(__netif_rx_schedule_prep(dev))) { | 420 | if (likely(__netif_rx_schedule_prep(dev, &ep->napi))) { |
430 | wrl(ep, REG_INTEN, REG_INTEN_TX); | 421 | wrl(ep, REG_INTEN, REG_INTEN_TX); |
431 | __netif_rx_schedule(dev); | 422 | __netif_rx_schedule(dev, &ep->napi); |
432 | } | 423 | } |
433 | spin_unlock(&ep->rx_lock); | 424 | spin_unlock(&ep->rx_lock); |
434 | } | 425 | } |
@@ -648,7 +639,10 @@ static int ep93xx_open(struct net_device *dev) | |||
648 | dev->dev_addr[4], dev->dev_addr[5]); | 639 | dev->dev_addr[4], dev->dev_addr[5]); |
649 | } | 640 | } |
650 | 641 | ||
642 | napi_enable(&ep->napi); | ||
643 | |||
651 | if (ep93xx_start_hw(dev)) { | 644 | if (ep93xx_start_hw(dev)) { |
645 | napi_disable(&ep->napi); | ||
652 | ep93xx_free_buffers(ep); | 646 | ep93xx_free_buffers(ep); |
653 | return -EIO; | 647 | return -EIO; |
654 | } | 648 | } |
@@ -662,6 +656,7 @@ static int ep93xx_open(struct net_device *dev) | |||
662 | 656 | ||
663 | err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); | 657 | err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); |
664 | if (err) { | 658 | if (err) { |
659 | napi_disable(&ep->napi); | ||
665 | ep93xx_stop_hw(dev); | 660 | ep93xx_stop_hw(dev); |
666 | ep93xx_free_buffers(ep); | 661 | ep93xx_free_buffers(ep); |
667 | return err; | 662 | return err; |
@@ -678,6 +673,7 @@ static int ep93xx_close(struct net_device *dev) | |||
678 | { | 673 | { |
679 | struct ep93xx_priv *ep = netdev_priv(dev); | 674 | struct ep93xx_priv *ep = netdev_priv(dev); |
680 | 675 | ||
676 | napi_disable(&ep->napi); | ||
681 | netif_stop_queue(dev); | 677 | netif_stop_queue(dev); |
682 | 678 | ||
683 | wrl(ep, REG_GIINTMSK, 0); | 679 | wrl(ep, REG_GIINTMSK, 0); |
@@ -788,14 +784,12 @@ struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) | |||
788 | 784 | ||
789 | dev->get_stats = ep93xx_get_stats; | 785 | dev->get_stats = ep93xx_get_stats; |
790 | dev->ethtool_ops = &ep93xx_ethtool_ops; | 786 | dev->ethtool_ops = &ep93xx_ethtool_ops; |
791 | dev->poll = ep93xx_poll; | ||
792 | dev->hard_start_xmit = ep93xx_xmit; | 787 | dev->hard_start_xmit = ep93xx_xmit; |
793 | dev->open = ep93xx_open; | 788 | dev->open = ep93xx_open; |
794 | dev->stop = ep93xx_close; | 789 | dev->stop = ep93xx_close; |
795 | dev->do_ioctl = ep93xx_ioctl; | 790 | dev->do_ioctl = ep93xx_ioctl; |
796 | 791 | ||
797 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | 792 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; |
798 | dev->weight = 64; | ||
799 | 793 | ||
800 | return dev; | 794 | return dev; |
801 | } | 795 | } |
@@ -847,6 +841,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev) | |||
847 | goto err_out; | 841 | goto err_out; |
848 | } | 842 | } |
849 | ep = netdev_priv(dev); | 843 | ep = netdev_priv(dev); |
844 | ep->dev = dev; | ||
845 | netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); | ||
850 | 846 | ||
851 | platform_set_drvdata(pdev, dev); | 847 | platform_set_drvdata(pdev, dev); |
852 | 848 | ||
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 0795df235492..b92b3e25c42a 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -848,10 +848,11 @@ static int b44_rx(struct b44 *bp, int budget) | |||
848 | return received; | 848 | return received; |
849 | } | 849 | } |
850 | 850 | ||
851 | static int b44_poll(struct net_device *netdev, int *budget) | 851 | static int b44_poll(struct napi_struct *napi, int budget) |
852 | { | 852 | { |
853 | struct b44 *bp = netdev_priv(netdev); | 853 | struct b44 *bp = container_of(napi, struct b44, napi); |
854 | int done; | 854 | struct net_device *netdev = bp->dev; |
855 | int work_done; | ||
855 | 856 | ||
856 | spin_lock_irq(&bp->lock); | 857 | spin_lock_irq(&bp->lock); |
857 | 858 | ||
@@ -862,22 +863,9 @@ static int b44_poll(struct net_device *netdev, int *budget) | |||
862 | } | 863 | } |
863 | spin_unlock_irq(&bp->lock); | 864 | spin_unlock_irq(&bp->lock); |
864 | 865 | ||
865 | done = 1; | 866 | work_done = 0; |
866 | if (bp->istat & ISTAT_RX) { | 867 | if (bp->istat & ISTAT_RX) |
867 | int orig_budget = *budget; | 868 | work_done += b44_rx(bp, budget); |
868 | int work_done; | ||
869 | |||
870 | if (orig_budget > netdev->quota) | ||
871 | orig_budget = netdev->quota; | ||
872 | |||
873 | work_done = b44_rx(bp, orig_budget); | ||
874 | |||
875 | *budget -= work_done; | ||
876 | netdev->quota -= work_done; | ||
877 | |||
878 | if (work_done >= orig_budget) | ||
879 | done = 0; | ||
880 | } | ||
881 | 869 | ||
882 | if (bp->istat & ISTAT_ERRORS) { | 870 | if (bp->istat & ISTAT_ERRORS) { |
883 | unsigned long flags; | 871 | unsigned long flags; |
@@ -888,15 +876,15 @@ static int b44_poll(struct net_device *netdev, int *budget) | |||
888 | b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); | 876 | b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); |
889 | netif_wake_queue(bp->dev); | 877 | netif_wake_queue(bp->dev); |
890 | spin_unlock_irqrestore(&bp->lock, flags); | 878 | spin_unlock_irqrestore(&bp->lock, flags); |
891 | done = 1; | 879 | work_done = 0; |
892 | } | 880 | } |
893 | 881 | ||
894 | if (done) { | 882 | if (work_done < budget) { |
895 | netif_rx_complete(netdev); | 883 | netif_rx_complete(netdev, napi); |
896 | b44_enable_ints(bp); | 884 | b44_enable_ints(bp); |
897 | } | 885 | } |
898 | 886 | ||
899 | return (done ? 0 : 1); | 887 | return work_done; |
900 | } | 888 | } |
901 | 889 | ||
902 | static irqreturn_t b44_interrupt(int irq, void *dev_id) | 890 | static irqreturn_t b44_interrupt(int irq, void *dev_id) |
@@ -924,13 +912,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id) | |||
924 | goto irq_ack; | 912 | goto irq_ack; |
925 | } | 913 | } |
926 | 914 | ||
927 | if (netif_rx_schedule_prep(dev)) { | 915 | if (netif_rx_schedule_prep(dev, &bp->napi)) { |
928 | /* NOTE: These writes are posted by the readback of | 916 | /* NOTE: These writes are posted by the readback of |
929 | * the ISTAT register below. | 917 | * the ISTAT register below. |
930 | */ | 918 | */ |
931 | bp->istat = istat; | 919 | bp->istat = istat; |
932 | __b44_disable_ints(bp); | 920 | __b44_disable_ints(bp); |
933 | __netif_rx_schedule(dev); | 921 | __netif_rx_schedule(dev, &bp->napi); |
934 | } else { | 922 | } else { |
935 | printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", | 923 | printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", |
936 | dev->name); | 924 | dev->name); |
@@ -1420,6 +1408,8 @@ static int b44_open(struct net_device *dev) | |||
1420 | if (err) | 1408 | if (err) |
1421 | goto out; | 1409 | goto out; |
1422 | 1410 | ||
1411 | napi_enable(&bp->napi); | ||
1412 | |||
1423 | b44_init_rings(bp); | 1413 | b44_init_rings(bp); |
1424 | b44_init_hw(bp, B44_FULL_RESET); | 1414 | b44_init_hw(bp, B44_FULL_RESET); |
1425 | 1415 | ||
@@ -1427,6 +1417,7 @@ static int b44_open(struct net_device *dev) | |||
1427 | 1417 | ||
1428 | err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); | 1418 | err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); |
1429 | if (unlikely(err < 0)) { | 1419 | if (unlikely(err < 0)) { |
1420 | napi_disable(&bp->napi); | ||
1430 | b44_chip_reset(bp); | 1421 | b44_chip_reset(bp); |
1431 | b44_free_rings(bp); | 1422 | b44_free_rings(bp); |
1432 | b44_free_consistent(bp); | 1423 | b44_free_consistent(bp); |
@@ -1609,7 +1600,7 @@ static int b44_close(struct net_device *dev) | |||
1609 | 1600 | ||
1610 | netif_stop_queue(dev); | 1601 | netif_stop_queue(dev); |
1611 | 1602 | ||
1612 | netif_poll_disable(dev); | 1603 | napi_disable(&bp->napi); |
1613 | 1604 | ||
1614 | del_timer_sync(&bp->timer); | 1605 | del_timer_sync(&bp->timer); |
1615 | 1606 | ||
@@ -1626,8 +1617,6 @@ static int b44_close(struct net_device *dev) | |||
1626 | 1617 | ||
1627 | free_irq(dev->irq, dev); | 1618 | free_irq(dev->irq, dev); |
1628 | 1619 | ||
1629 | netif_poll_enable(dev); | ||
1630 | |||
1631 | if (bp->flags & B44_FLAG_WOL_ENABLE) { | 1620 | if (bp->flags & B44_FLAG_WOL_ENABLE) { |
1632 | b44_init_hw(bp, B44_PARTIAL_RESET); | 1621 | b44_init_hw(bp, B44_PARTIAL_RESET); |
1633 | b44_setup_wol(bp); | 1622 | b44_setup_wol(bp); |
@@ -2194,8 +2183,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev, | |||
2194 | dev->set_mac_address = b44_set_mac_addr; | 2183 | dev->set_mac_address = b44_set_mac_addr; |
2195 | dev->do_ioctl = b44_ioctl; | 2184 | dev->do_ioctl = b44_ioctl; |
2196 | dev->tx_timeout = b44_tx_timeout; | 2185 | dev->tx_timeout = b44_tx_timeout; |
2197 | dev->poll = b44_poll; | 2186 | netif_napi_add(dev, &bp->napi, b44_poll, 64); |
2198 | dev->weight = 64; | ||
2199 | dev->watchdog_timeo = B44_TX_TIMEOUT; | 2187 | dev->watchdog_timeo = B44_TX_TIMEOUT; |
2200 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2188 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2201 | dev->poll_controller = b44_poll_controller; | 2189 | dev->poll_controller = b44_poll_controller; |
diff --git a/drivers/net/b44.h b/drivers/net/b44.h index e537e63f292e..63c55a4ab3cd 100644 --- a/drivers/net/b44.h +++ b/drivers/net/b44.h | |||
@@ -423,6 +423,8 @@ struct b44 { | |||
423 | struct ring_info *rx_buffers; | 423 | struct ring_info *rx_buffers; |
424 | struct ring_info *tx_buffers; | 424 | struct ring_info *tx_buffers; |
425 | 425 | ||
426 | struct napi_struct napi; | ||
427 | |||
426 | u32 dma_offset; | 428 | u32 dma_offset; |
427 | u32 flags; | 429 | u32 flags; |
428 | #define B44_FLAG_B0_ANDLATER 0x00000001 | 430 | #define B44_FLAG_B0_ANDLATER 0x00000001 |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 66eed22cbd21..ab028ad04235 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -428,7 +428,7 @@ bnx2_netif_stop(struct bnx2 *bp) | |||
428 | { | 428 | { |
429 | bnx2_disable_int_sync(bp); | 429 | bnx2_disable_int_sync(bp); |
430 | if (netif_running(bp->dev)) { | 430 | if (netif_running(bp->dev)) { |
431 | netif_poll_disable(bp->dev); | 431 | napi_disable(&bp->napi); |
432 | netif_tx_disable(bp->dev); | 432 | netif_tx_disable(bp->dev); |
433 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | 433 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ |
434 | } | 434 | } |
@@ -440,7 +440,7 @@ bnx2_netif_start(struct bnx2 *bp) | |||
440 | if (atomic_dec_and_test(&bp->intr_sem)) { | 440 | if (atomic_dec_and_test(&bp->intr_sem)) { |
441 | if (netif_running(bp->dev)) { | 441 | if (netif_running(bp->dev)) { |
442 | netif_wake_queue(bp->dev); | 442 | netif_wake_queue(bp->dev); |
443 | netif_poll_enable(bp->dev); | 443 | napi_enable(&bp->napi); |
444 | bnx2_enable_int(bp); | 444 | bnx2_enable_int(bp); |
445 | } | 445 | } |
446 | } | 446 | } |
@@ -2551,7 +2551,7 @@ bnx2_msi(int irq, void *dev_instance) | |||
2551 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 2551 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2552 | return IRQ_HANDLED; | 2552 | return IRQ_HANDLED; |
2553 | 2553 | ||
2554 | netif_rx_schedule(dev); | 2554 | netif_rx_schedule(dev, &bp->napi); |
2555 | 2555 | ||
2556 | return IRQ_HANDLED; | 2556 | return IRQ_HANDLED; |
2557 | } | 2557 | } |
@@ -2568,7 +2568,7 @@ bnx2_msi_1shot(int irq, void *dev_instance) | |||
2568 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 2568 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2569 | return IRQ_HANDLED; | 2569 | return IRQ_HANDLED; |
2570 | 2570 | ||
2571 | netif_rx_schedule(dev); | 2571 | netif_rx_schedule(dev, &bp->napi); |
2572 | 2572 | ||
2573 | return IRQ_HANDLED; | 2573 | return IRQ_HANDLED; |
2574 | } | 2574 | } |
@@ -2604,9 +2604,9 @@ bnx2_interrupt(int irq, void *dev_instance) | |||
2604 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 2604 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2605 | return IRQ_HANDLED; | 2605 | return IRQ_HANDLED; |
2606 | 2606 | ||
2607 | if (netif_rx_schedule_prep(dev)) { | 2607 | if (netif_rx_schedule_prep(dev, &bp->napi)) { |
2608 | bp->last_status_idx = sblk->status_idx; | 2608 | bp->last_status_idx = sblk->status_idx; |
2609 | __netif_rx_schedule(dev); | 2609 | __netif_rx_schedule(dev, &bp->napi); |
2610 | } | 2610 | } |
2611 | 2611 | ||
2612 | return IRQ_HANDLED; | 2612 | return IRQ_HANDLED; |
@@ -2632,12 +2632,14 @@ bnx2_has_work(struct bnx2 *bp) | |||
2632 | } | 2632 | } |
2633 | 2633 | ||
2634 | static int | 2634 | static int |
2635 | bnx2_poll(struct net_device *dev, int *budget) | 2635 | bnx2_poll(struct napi_struct *napi, int budget) |
2636 | { | 2636 | { |
2637 | struct bnx2 *bp = netdev_priv(dev); | 2637 | struct bnx2 *bp = container_of(napi, struct bnx2, napi); |
2638 | struct net_device *dev = bp->dev; | ||
2638 | struct status_block *sblk = bp->status_blk; | 2639 | struct status_block *sblk = bp->status_blk; |
2639 | u32 status_attn_bits = sblk->status_attn_bits; | 2640 | u32 status_attn_bits = sblk->status_attn_bits; |
2640 | u32 status_attn_bits_ack = sblk->status_attn_bits_ack; | 2641 | u32 status_attn_bits_ack = sblk->status_attn_bits_ack; |
2642 | int work_done = 0; | ||
2641 | 2643 | ||
2642 | if ((status_attn_bits & STATUS_ATTN_EVENTS) != | 2644 | if ((status_attn_bits & STATUS_ATTN_EVENTS) != |
2643 | (status_attn_bits_ack & STATUS_ATTN_EVENTS)) { | 2645 | (status_attn_bits_ack & STATUS_ATTN_EVENTS)) { |
@@ -2655,23 +2657,14 @@ bnx2_poll(struct net_device *dev, int *budget) | |||
2655 | if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) | 2657 | if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) |
2656 | bnx2_tx_int(bp); | 2658 | bnx2_tx_int(bp); |
2657 | 2659 | ||
2658 | if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) { | 2660 | if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) |
2659 | int orig_budget = *budget; | 2661 | work_done = bnx2_rx_int(bp, budget); |
2660 | int work_done; | ||
2661 | |||
2662 | if (orig_budget > dev->quota) | ||
2663 | orig_budget = dev->quota; | ||
2664 | |||
2665 | work_done = bnx2_rx_int(bp, orig_budget); | ||
2666 | *budget -= work_done; | ||
2667 | dev->quota -= work_done; | ||
2668 | } | ||
2669 | 2662 | ||
2670 | bp->last_status_idx = bp->status_blk->status_idx; | 2663 | bp->last_status_idx = bp->status_blk->status_idx; |
2671 | rmb(); | 2664 | rmb(); |
2672 | 2665 | ||
2673 | if (!bnx2_has_work(bp)) { | 2666 | if (!bnx2_has_work(bp)) { |
2674 | netif_rx_complete(dev); | 2667 | netif_rx_complete(dev, napi); |
2675 | if (likely(bp->flags & USING_MSI_FLAG)) { | 2668 | if (likely(bp->flags & USING_MSI_FLAG)) { |
2676 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2669 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2677 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 2670 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
@@ -2686,10 +2679,9 @@ bnx2_poll(struct net_device *dev, int *budget) | |||
2686 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2679 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2687 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 2680 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
2688 | bp->last_status_idx); | 2681 | bp->last_status_idx); |
2689 | return 0; | ||
2690 | } | 2682 | } |
2691 | 2683 | ||
2692 | return 1; | 2684 | return work_done; |
2693 | } | 2685 | } |
2694 | 2686 | ||
2695 | /* Called with rtnl_lock from vlan functions and also netif_tx_lock | 2687 | /* Called with rtnl_lock from vlan functions and also netif_tx_lock |
@@ -5039,6 +5031,8 @@ bnx2_open(struct net_device *dev) | |||
5039 | if (rc) | 5031 | if (rc) |
5040 | return rc; | 5032 | return rc; |
5041 | 5033 | ||
5034 | napi_enable(&bp->napi); | ||
5035 | |||
5042 | if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) { | 5036 | if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) { |
5043 | if (pci_enable_msi(bp->pdev) == 0) { | 5037 | if (pci_enable_msi(bp->pdev) == 0) { |
5044 | bp->flags |= USING_MSI_FLAG; | 5038 | bp->flags |= USING_MSI_FLAG; |
@@ -5049,6 +5043,7 @@ bnx2_open(struct net_device *dev) | |||
5049 | rc = bnx2_request_irq(bp); | 5043 | rc = bnx2_request_irq(bp); |
5050 | 5044 | ||
5051 | if (rc) { | 5045 | if (rc) { |
5046 | napi_disable(&bp->napi); | ||
5052 | bnx2_free_mem(bp); | 5047 | bnx2_free_mem(bp); |
5053 | return rc; | 5048 | return rc; |
5054 | } | 5049 | } |
@@ -5056,6 +5051,7 @@ bnx2_open(struct net_device *dev) | |||
5056 | rc = bnx2_init_nic(bp); | 5051 | rc = bnx2_init_nic(bp); |
5057 | 5052 | ||
5058 | if (rc) { | 5053 | if (rc) { |
5054 | napi_disable(&bp->napi); | ||
5059 | bnx2_free_irq(bp); | 5055 | bnx2_free_irq(bp); |
5060 | bnx2_free_skbs(bp); | 5056 | bnx2_free_skbs(bp); |
5061 | bnx2_free_mem(bp); | 5057 | bnx2_free_mem(bp); |
@@ -5088,6 +5084,7 @@ bnx2_open(struct net_device *dev) | |||
5088 | rc = bnx2_request_irq(bp); | 5084 | rc = bnx2_request_irq(bp); |
5089 | 5085 | ||
5090 | if (rc) { | 5086 | if (rc) { |
5087 | napi_disable(&bp->napi); | ||
5091 | bnx2_free_skbs(bp); | 5088 | bnx2_free_skbs(bp); |
5092 | bnx2_free_mem(bp); | 5089 | bnx2_free_mem(bp); |
5093 | del_timer_sync(&bp->timer); | 5090 | del_timer_sync(&bp->timer); |
@@ -5301,7 +5298,8 @@ bnx2_close(struct net_device *dev) | |||
5301 | while (bp->in_reset_task) | 5298 | while (bp->in_reset_task) |
5302 | msleep(1); | 5299 | msleep(1); |
5303 | 5300 | ||
5304 | bnx2_netif_stop(bp); | 5301 | bnx2_disable_int_sync(bp); |
5302 | napi_disable(&bp->napi); | ||
5305 | del_timer_sync(&bp->timer); | 5303 | del_timer_sync(&bp->timer); |
5306 | if (bp->flags & NO_WOL_FLAG) | 5304 | if (bp->flags & NO_WOL_FLAG) |
5307 | reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; | 5305 | reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; |
@@ -6858,11 +6856,10 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6858 | #ifdef BCM_VLAN | 6856 | #ifdef BCM_VLAN |
6859 | dev->vlan_rx_register = bnx2_vlan_rx_register; | 6857 | dev->vlan_rx_register = bnx2_vlan_rx_register; |
6860 | #endif | 6858 | #endif |
6861 | dev->poll = bnx2_poll; | ||
6862 | dev->ethtool_ops = &bnx2_ethtool_ops; | 6859 | dev->ethtool_ops = &bnx2_ethtool_ops; |
6863 | dev->weight = 64; | ||
6864 | 6860 | ||
6865 | bp = netdev_priv(dev); | 6861 | bp = netdev_priv(dev); |
6862 | netif_napi_add(dev, &bp->napi, bnx2_poll, 64); | ||
6866 | 6863 | ||
6867 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | 6864 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) |
6868 | dev->poll_controller = poll_bnx2; | 6865 | dev->poll_controller = poll_bnx2; |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 102adfe1e923..fbae439db647 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6473,6 +6473,8 @@ struct bnx2 { | |||
6473 | struct net_device *dev; | 6473 | struct net_device *dev; |
6474 | struct pci_dev *pdev; | 6474 | struct pci_dev *pdev; |
6475 | 6475 | ||
6476 | struct napi_struct napi; | ||
6477 | |||
6476 | atomic_t intr_sem; | 6478 | atomic_t intr_sem; |
6477 | 6479 | ||
6478 | struct status_block *status_blk; | 6480 | struct status_block *status_blk; |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index f6e4030c73d1..13f14df21e6e 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -2485,7 +2485,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id) | |||
2485 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | 2485 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ |
2486 | #ifdef USE_NAPI | 2486 | #ifdef USE_NAPI |
2487 | cas_mask_intr(cp); | 2487 | cas_mask_intr(cp); |
2488 | netif_rx_schedule(dev); | 2488 | netif_rx_schedule(dev, &cp->napi); |
2489 | #else | 2489 | #else |
2490 | cas_rx_ringN(cp, ring, 0); | 2490 | cas_rx_ringN(cp, ring, 0); |
2491 | #endif | 2491 | #endif |
@@ -2536,7 +2536,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id) | |||
2536 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | 2536 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ |
2537 | #ifdef USE_NAPI | 2537 | #ifdef USE_NAPI |
2538 | cas_mask_intr(cp); | 2538 | cas_mask_intr(cp); |
2539 | netif_rx_schedule(dev); | 2539 | netif_rx_schedule(dev, &cp->napi); |
2540 | #else | 2540 | #else |
2541 | cas_rx_ringN(cp, 1, 0); | 2541 | cas_rx_ringN(cp, 1, 0); |
2542 | #endif | 2542 | #endif |
@@ -2592,7 +2592,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id) | |||
2592 | if (status & INTR_RX_DONE) { | 2592 | if (status & INTR_RX_DONE) { |
2593 | #ifdef USE_NAPI | 2593 | #ifdef USE_NAPI |
2594 | cas_mask_intr(cp); | 2594 | cas_mask_intr(cp); |
2595 | netif_rx_schedule(dev); | 2595 | netif_rx_schedule(dev, &cp->napi); |
2596 | #else | 2596 | #else |
2597 | cas_rx_ringN(cp, 0, 0); | 2597 | cas_rx_ringN(cp, 0, 0); |
2598 | #endif | 2598 | #endif |
@@ -2607,9 +2607,10 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id) | |||
2607 | 2607 | ||
2608 | 2608 | ||
2609 | #ifdef USE_NAPI | 2609 | #ifdef USE_NAPI |
2610 | static int cas_poll(struct net_device *dev, int *budget) | 2610 | static int cas_poll(struct napi_struct *napi, int budget) |
2611 | { | 2611 | { |
2612 | struct cas *cp = netdev_priv(dev); | 2612 | struct cas *cp = container_of(napi, struct cas, napi); |
2613 | struct net_device *dev = cp->dev; | ||
2613 | int i, enable_intr, todo, credits; | 2614 | int i, enable_intr, todo, credits; |
2614 | u32 status = readl(cp->regs + REG_INTR_STATUS); | 2615 | u32 status = readl(cp->regs + REG_INTR_STATUS); |
2615 | unsigned long flags; | 2616 | unsigned long flags; |
@@ -2620,20 +2621,18 @@ static int cas_poll(struct net_device *dev, int *budget) | |||
2620 | 2621 | ||
2621 | /* NAPI rx packets. we spread the credits across all of the | 2622 | /* NAPI rx packets. we spread the credits across all of the |
2622 | * rxc rings | 2623 | * rxc rings |
2623 | */ | 2624 | * |
2624 | todo = min(*budget, dev->quota); | 2625 | * to make sure we're fair with the work we loop through each |
2625 | |||
2626 | /* to make sure we're fair with the work we loop through each | ||
2627 | * ring N_RX_COMP_RING times with a request of | 2626 | * ring N_RX_COMP_RING times with a request of |
2628 | * todo / N_RX_COMP_RINGS | 2627 | * budget / N_RX_COMP_RINGS |
2629 | */ | 2628 | */ |
2630 | enable_intr = 1; | 2629 | enable_intr = 1; |
2631 | credits = 0; | 2630 | credits = 0; |
2632 | for (i = 0; i < N_RX_COMP_RINGS; i++) { | 2631 | for (i = 0; i < N_RX_COMP_RINGS; i++) { |
2633 | int j; | 2632 | int j; |
2634 | for (j = 0; j < N_RX_COMP_RINGS; j++) { | 2633 | for (j = 0; j < N_RX_COMP_RINGS; j++) { |
2635 | credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS); | 2634 | credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); |
2636 | if (credits >= todo) { | 2635 | if (credits >= budget) { |
2637 | enable_intr = 0; | 2636 | enable_intr = 0; |
2638 | goto rx_comp; | 2637 | goto rx_comp; |
2639 | } | 2638 | } |
@@ -2641,9 +2640,6 @@ static int cas_poll(struct net_device *dev, int *budget) | |||
2641 | } | 2640 | } |
2642 | 2641 | ||
2643 | rx_comp: | 2642 | rx_comp: |
2644 | *budget -= credits; | ||
2645 | dev->quota -= credits; | ||
2646 | |||
2647 | /* final rx completion */ | 2643 | /* final rx completion */ |
2648 | spin_lock_irqsave(&cp->lock, flags); | 2644 | spin_lock_irqsave(&cp->lock, flags); |
2649 | if (status) | 2645 | if (status) |
@@ -2674,11 +2670,10 @@ rx_comp: | |||
2674 | #endif | 2670 | #endif |
2675 | spin_unlock_irqrestore(&cp->lock, flags); | 2671 | spin_unlock_irqrestore(&cp->lock, flags); |
2676 | if (enable_intr) { | 2672 | if (enable_intr) { |
2677 | netif_rx_complete(dev); | 2673 | netif_rx_complete(dev, napi); |
2678 | cas_unmask_intr(cp); | 2674 | cas_unmask_intr(cp); |
2679 | return 0; | ||
2680 | } | 2675 | } |
2681 | return 1; | 2676 | return credits; |
2682 | } | 2677 | } |
2683 | #endif | 2678 | #endif |
2684 | 2679 | ||
@@ -4351,6 +4346,9 @@ static int cas_open(struct net_device *dev) | |||
4351 | goto err_spare; | 4346 | goto err_spare; |
4352 | } | 4347 | } |
4353 | 4348 | ||
4349 | #ifdef USE_NAPI | ||
4350 | napi_enable(&cp->napi); | ||
4351 | #endif | ||
4354 | /* init hw */ | 4352 | /* init hw */ |
4355 | cas_lock_all_save(cp, flags); | 4353 | cas_lock_all_save(cp, flags); |
4356 | cas_clean_rings(cp); | 4354 | cas_clean_rings(cp); |
@@ -4376,6 +4374,9 @@ static int cas_close(struct net_device *dev) | |||
4376 | unsigned long flags; | 4374 | unsigned long flags; |
4377 | struct cas *cp = netdev_priv(dev); | 4375 | struct cas *cp = netdev_priv(dev); |
4378 | 4376 | ||
4377 | #ifdef USE_NAPI | ||
4378 | napi_enable(&cp->napi); | ||
4379 | #endif | ||
4379 | /* Make sure we don't get distracted by suspend/resume */ | 4380 | /* Make sure we don't get distracted by suspend/resume */ |
4380 | mutex_lock(&cp->pm_mutex); | 4381 | mutex_lock(&cp->pm_mutex); |
4381 | 4382 | ||
@@ -5062,8 +5063,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
5062 | dev->watchdog_timeo = CAS_TX_TIMEOUT; | 5063 | dev->watchdog_timeo = CAS_TX_TIMEOUT; |
5063 | dev->change_mtu = cas_change_mtu; | 5064 | dev->change_mtu = cas_change_mtu; |
5064 | #ifdef USE_NAPI | 5065 | #ifdef USE_NAPI |
5065 | dev->poll = cas_poll; | 5066 | netif_napi_add(dev, &cp->napi, cas_poll, 64); |
5066 | dev->weight = 64; | ||
5067 | #endif | 5067 | #endif |
5068 | #ifdef CONFIG_NET_POLL_CONTROLLER | 5068 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5069 | dev->poll_controller = cas_netpoll; | 5069 | dev->poll_controller = cas_netpoll; |
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h index a970804487c7..2f93f83342d2 100644 --- a/drivers/net/cassini.h +++ b/drivers/net/cassini.h | |||
@@ -4280,6 +4280,8 @@ struct cas { | |||
4280 | int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS]; | 4280 | int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS]; |
4281 | int rx_last[N_RX_DESC_RINGS]; | 4281 | int rx_last[N_RX_DESC_RINGS]; |
4282 | 4282 | ||
4283 | struct napi_struct napi; | ||
4284 | |||
4283 | /* Set when chip is actually in operational state | 4285 | /* Set when chip is actually in operational state |
4284 | * (ie. not power managed) */ | 4286 | * (ie. not power managed) */ |
4285 | int hw_running; | 4287 | int hw_running; |
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index 8ba702c8b560..b5de4452cf24 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h | |||
@@ -278,6 +278,7 @@ struct adapter { | |||
278 | struct peespi *espi; | 278 | struct peespi *espi; |
279 | struct petp *tp; | 279 | struct petp *tp; |
280 | 280 | ||
281 | struct napi_struct napi; | ||
281 | struct port_info port[MAX_NPORTS]; | 282 | struct port_info port[MAX_NPORTS]; |
282 | struct delayed_work stats_update_task; | 283 | struct delayed_work stats_update_task; |
283 | struct timer_list stats_update_timer; | 284 | struct timer_list stats_update_timer; |
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index 231ce43b97cf..593736c7550d 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c | |||
@@ -255,8 +255,11 @@ static int cxgb_open(struct net_device *dev) | |||
255 | struct adapter *adapter = dev->priv; | 255 | struct adapter *adapter = dev->priv; |
256 | int other_ports = adapter->open_device_map & PORT_MASK; | 256 | int other_ports = adapter->open_device_map & PORT_MASK; |
257 | 257 | ||
258 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) | 258 | napi_enable(&adapter->napi); |
259 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { | ||
260 | napi_disable(&adapter->napi); | ||
259 | return err; | 261 | return err; |
262 | } | ||
260 | 263 | ||
261 | __set_bit(dev->if_port, &adapter->open_device_map); | 264 | __set_bit(dev->if_port, &adapter->open_device_map); |
262 | link_start(&adapter->port[dev->if_port]); | 265 | link_start(&adapter->port[dev->if_port]); |
@@ -274,6 +277,7 @@ static int cxgb_close(struct net_device *dev) | |||
274 | struct cmac *mac = p->mac; | 277 | struct cmac *mac = p->mac; |
275 | 278 | ||
276 | netif_stop_queue(dev); | 279 | netif_stop_queue(dev); |
280 | napi_disable(&adapter->napi); | ||
277 | mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); | 281 | mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); |
278 | netif_carrier_off(dev); | 282 | netif_carrier_off(dev); |
279 | 283 | ||
@@ -1113,8 +1117,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1113 | netdev->poll_controller = t1_netpoll; | 1117 | netdev->poll_controller = t1_netpoll; |
1114 | #endif | 1118 | #endif |
1115 | #ifdef CONFIG_CHELSIO_T1_NAPI | 1119 | #ifdef CONFIG_CHELSIO_T1_NAPI |
1116 | netdev->weight = 64; | 1120 | netif_napi_add(netdev, &adapter->napi, t1_poll, 64); |
1117 | netdev->poll = t1_poll; | ||
1118 | #endif | 1121 | #endif |
1119 | 1122 | ||
1120 | SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); | 1123 | SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index e4f874a70fe5..ffa7e649a6ef 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1620,23 +1620,20 @@ static int process_pure_responses(struct adapter *adapter) | |||
1620 | * or protection from interrupts as data interrupts are off at this point and | 1620 | * or protection from interrupts as data interrupts are off at this point and |
1621 | * other adapter interrupts do not interfere. | 1621 | * other adapter interrupts do not interfere. |
1622 | */ | 1622 | */ |
1623 | int t1_poll(struct net_device *dev, int *budget) | 1623 | int t1_poll(struct napi_struct *napi, int budget) |
1624 | { | 1624 | { |
1625 | struct adapter *adapter = dev->priv; | 1625 | struct adapter *adapter = container_of(napi, struct adapter, napi); |
1626 | struct net_device *dev = adapter->port[0].dev; | ||
1626 | int work_done; | 1627 | int work_done; |
1627 | 1628 | ||
1628 | work_done = process_responses(adapter, min(*budget, dev->quota)); | 1629 | work_done = process_responses(adapter, budget); |
1629 | *budget -= work_done; | ||
1630 | dev->quota -= work_done; | ||
1631 | |||
1632 | if (unlikely(responses_pending(adapter))) | ||
1633 | return 1; | ||
1634 | |||
1635 | netif_rx_complete(dev); | ||
1636 | writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); | ||
1637 | |||
1638 | return 0; | ||
1639 | 1630 | ||
1631 | if (likely(!responses_pending(adapter))) { | ||
1632 | netif_rx_complete(dev, napi); | ||
1633 | writel(adapter->sge->respQ.cidx, | ||
1634 | adapter->regs + A_SG_SLEEPING); | ||
1635 | } | ||
1636 | return work_done; | ||
1640 | } | 1637 | } |
1641 | 1638 | ||
1642 | /* | 1639 | /* |
@@ -1653,13 +1650,13 @@ irqreturn_t t1_interrupt(int irq, void *data) | |||
1653 | 1650 | ||
1654 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); | 1651 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); |
1655 | 1652 | ||
1656 | if (__netif_rx_schedule_prep(dev)) { | 1653 | if (napi_schedule_prep(&adapter->napi)) { |
1657 | if (process_pure_responses(adapter)) | 1654 | if (process_pure_responses(adapter)) |
1658 | __netif_rx_schedule(dev); | 1655 | __netif_rx_schedule(dev, &adapter->napi); |
1659 | else { | 1656 | else { |
1660 | /* no data, no NAPI needed */ | 1657 | /* no data, no NAPI needed */ |
1661 | writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); | 1658 | writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); |
1662 | netif_poll_enable(dev); /* undo schedule_prep */ | 1659 | napi_enable(&adapter->napi); /* undo schedule_prep */ |
1663 | } | 1660 | } |
1664 | } | 1661 | } |
1665 | return IRQ_HANDLED; | 1662 | return IRQ_HANDLED; |
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h index d132a0ef2a22..713d9c55f24d 100644 --- a/drivers/net/chelsio/sge.h +++ b/drivers/net/chelsio/sge.h | |||
@@ -77,7 +77,7 @@ int t1_sge_configure(struct sge *, struct sge_params *); | |||
77 | int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); | 77 | int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); |
78 | void t1_sge_destroy(struct sge *); | 78 | void t1_sge_destroy(struct sge *); |
79 | irqreturn_t t1_interrupt(int irq, void *cookie); | 79 | irqreturn_t t1_interrupt(int irq, void *cookie); |
80 | int t1_poll(struct net_device *, int *); | 80 | int t1_poll(struct napi_struct *, int); |
81 | 81 | ||
82 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); | 82 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); |
83 | void t1_set_vlan_accel(struct adapter *adapter, int on_off); | 83 | void t1_set_vlan_accel(struct adapter *adapter, int on_off); |
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 20e887de2545..044261703381 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h | |||
@@ -49,11 +49,13 @@ | |||
49 | typedef irqreturn_t(*intr_handler_t) (int, void *); | 49 | typedef irqreturn_t(*intr_handler_t) (int, void *); |
50 | 50 | ||
51 | struct vlan_group; | 51 | struct vlan_group; |
52 | |||
53 | struct adapter; | 52 | struct adapter; |
53 | struct sge_qset; | ||
54 | |||
54 | struct port_info { | 55 | struct port_info { |
55 | struct adapter *adapter; | 56 | struct adapter *adapter; |
56 | struct vlan_group *vlan_grp; | 57 | struct vlan_group *vlan_grp; |
58 | struct sge_qset *qs; | ||
57 | const struct port_type_info *port_type; | 59 | const struct port_type_info *port_type; |
58 | u8 port_id; | 60 | u8 port_id; |
59 | u8 rx_csum_offload; | 61 | u8 rx_csum_offload; |
@@ -173,10 +175,12 @@ enum { /* per port SGE statistics */ | |||
173 | }; | 175 | }; |
174 | 176 | ||
175 | struct sge_qset { /* an SGE queue set */ | 177 | struct sge_qset { /* an SGE queue set */ |
178 | struct adapter *adap; | ||
179 | struct napi_struct napi; | ||
176 | struct sge_rspq rspq; | 180 | struct sge_rspq rspq; |
177 | struct sge_fl fl[SGE_RXQ_PER_SET]; | 181 | struct sge_fl fl[SGE_RXQ_PER_SET]; |
178 | struct sge_txq txq[SGE_TXQ_PER_SET]; | 182 | struct sge_txq txq[SGE_TXQ_PER_SET]; |
179 | struct net_device *netdev; /* associated net device */ | 183 | struct net_device *netdev; |
180 | unsigned long txq_stopped; /* which Tx queues are stopped */ | 184 | unsigned long txq_stopped; /* which Tx queues are stopped */ |
181 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ | 185 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ |
182 | unsigned long port_stats[SGE_PSTAT_MAX]; | 186 | unsigned long port_stats[SGE_PSTAT_MAX]; |
@@ -221,12 +225,6 @@ struct adapter { | |||
221 | struct delayed_work adap_check_task; | 225 | struct delayed_work adap_check_task; |
222 | struct work_struct ext_intr_handler_task; | 226 | struct work_struct ext_intr_handler_task; |
223 | 227 | ||
224 | /* | ||
225 | * Dummy netdevices are needed when using multiple receive queues with | ||
226 | * NAPI as each netdevice can service only one queue. | ||
227 | */ | ||
228 | struct net_device *dummy_netdev[SGE_QSETS - 1]; | ||
229 | |||
230 | struct dentry *debugfs_root; | 228 | struct dentry *debugfs_root; |
231 | 229 | ||
232 | struct mutex mdio_lock; | 230 | struct mutex mdio_lock; |
@@ -253,12 +251,6 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) | |||
253 | return netdev_priv(adap->port[idx]); | 251 | return netdev_priv(adap->port[idx]); |
254 | } | 252 | } |
255 | 253 | ||
256 | /* | ||
257 | * We use the spare atalk_ptr to map a net device to its SGE queue set. | ||
258 | * This is a macro so it can be used as l-value. | ||
259 | */ | ||
260 | #define dev2qset(netdev) ((netdev)->atalk_ptr) | ||
261 | |||
262 | #define OFFLOAD_DEVMAP_BIT 15 | 254 | #define OFFLOAD_DEVMAP_BIT 15 |
263 | 255 | ||
264 | #define tdev2adap(d) container_of(d, struct adapter, tdev) | 256 | #define tdev2adap(d) container_of(d, struct adapter, tdev) |
@@ -284,7 +276,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb); | |||
284 | void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); | 276 | void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); |
285 | int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | 277 | int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, |
286 | int irq_vec_idx, const struct qset_params *p, | 278 | int irq_vec_idx, const struct qset_params *p, |
287 | int ntxq, struct net_device *netdev); | 279 | int ntxq, struct net_device *dev); |
288 | int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, | 280 | int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, |
289 | unsigned char *data); | 281 | unsigned char *data); |
290 | irqreturn_t t3_sge_intr_msix(int irq, void *cookie); | 282 | irqreturn_t t3_sge_intr_msix(int irq, void *cookie); |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 5ab319cfe5de..5db7d4e27ec0 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -339,49 +339,17 @@ static void setup_rss(struct adapter *adap) | |||
339 | V_RRCPLCPUSIZE(6), cpus, rspq_map); | 339 | V_RRCPLCPUSIZE(6), cpus, rspq_map); |
340 | } | 340 | } |
341 | 341 | ||
342 | /* | 342 | static void init_napi(struct adapter *adap) |
343 | * If we have multiple receive queues per port serviced by NAPI we need one | ||
344 | * netdevice per queue as NAPI operates on netdevices. We already have one | ||
345 | * netdevice, namely the one associated with the interface, so we use dummy | ||
346 | * ones for any additional queues. Note that these netdevices exist purely | ||
347 | * so that NAPI has something to work with, they do not represent network | ||
348 | * ports and are not registered. | ||
349 | */ | ||
350 | static int init_dummy_netdevs(struct adapter *adap) | ||
351 | { | 343 | { |
352 | int i, j, dummy_idx = 0; | 344 | int i; |
353 | struct net_device *nd; | ||
354 | |||
355 | for_each_port(adap, i) { | ||
356 | struct net_device *dev = adap->port[i]; | ||
357 | const struct port_info *pi = netdev_priv(dev); | ||
358 | |||
359 | for (j = 0; j < pi->nqsets - 1; j++) { | ||
360 | if (!adap->dummy_netdev[dummy_idx]) { | ||
361 | struct port_info *p; | ||
362 | |||
363 | nd = alloc_netdev(sizeof(*p), "", ether_setup); | ||
364 | if (!nd) | ||
365 | goto free_all; | ||
366 | 345 | ||
367 | p = netdev_priv(nd); | 346 | for (i = 0; i < SGE_QSETS; i++) { |
368 | p->adapter = adap; | 347 | struct sge_qset *qs = &adap->sge.qs[i]; |
369 | nd->weight = 64; | ||
370 | set_bit(__LINK_STATE_START, &nd->state); | ||
371 | adap->dummy_netdev[dummy_idx] = nd; | ||
372 | } | ||
373 | strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name); | ||
374 | dummy_idx++; | ||
375 | } | ||
376 | } | ||
377 | return 0; | ||
378 | 348 | ||
379 | free_all: | 349 | if (qs->adap) |
380 | while (--dummy_idx >= 0) { | 350 | netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, |
381 | free_netdev(adap->dummy_netdev[dummy_idx]); | 351 | 64); |
382 | adap->dummy_netdev[dummy_idx] = NULL; | ||
383 | } | 352 | } |
384 | return -ENOMEM; | ||
385 | } | 353 | } |
386 | 354 | ||
387 | /* | 355 | /* |
@@ -392,20 +360,18 @@ free_all: | |||
392 | static void quiesce_rx(struct adapter *adap) | 360 | static void quiesce_rx(struct adapter *adap) |
393 | { | 361 | { |
394 | int i; | 362 | int i; |
395 | struct net_device *dev; | ||
396 | 363 | ||
397 | for_each_port(adap, i) { | 364 | for (i = 0; i < SGE_QSETS; i++) |
398 | dev = adap->port[i]; | 365 | if (adap->sge.qs[i].adap) |
399 | while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) | 366 | napi_disable(&adap->sge.qs[i].napi); |
400 | msleep(1); | 367 | } |
401 | } | ||
402 | 368 | ||
403 | for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) { | 369 | static void enable_all_napi(struct adapter *adap) |
404 | dev = adap->dummy_netdev[i]; | 370 | { |
405 | if (dev) | 371 | int i; |
406 | while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) | 372 | for (i = 0; i < SGE_QSETS; i++) |
407 | msleep(1); | 373 | if (adap->sge.qs[i].adap) |
408 | } | 374 | napi_enable(&adap->sge.qs[i].napi); |
409 | } | 375 | } |
410 | 376 | ||
411 | /** | 377 | /** |
@@ -418,7 +384,7 @@ static void quiesce_rx(struct adapter *adap) | |||
418 | */ | 384 | */ |
419 | static int setup_sge_qsets(struct adapter *adap) | 385 | static int setup_sge_qsets(struct adapter *adap) |
420 | { | 386 | { |
421 | int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0; | 387 | int i, j, err, irq_idx = 0, qset_idx = 0; |
422 | unsigned int ntxq = SGE_TXQ_PER_SET; | 388 | unsigned int ntxq = SGE_TXQ_PER_SET; |
423 | 389 | ||
424 | if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) | 390 | if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) |
@@ -426,15 +392,14 @@ static int setup_sge_qsets(struct adapter *adap) | |||
426 | 392 | ||
427 | for_each_port(adap, i) { | 393 | for_each_port(adap, i) { |
428 | struct net_device *dev = adap->port[i]; | 394 | struct net_device *dev = adap->port[i]; |
429 | const struct port_info *pi = netdev_priv(dev); | 395 | struct port_info *pi = netdev_priv(dev); |
430 | 396 | ||
397 | pi->qs = &adap->sge.qs[pi->first_qset]; | ||
431 | for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { | 398 | for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { |
432 | err = t3_sge_alloc_qset(adap, qset_idx, 1, | 399 | err = t3_sge_alloc_qset(adap, qset_idx, 1, |
433 | (adap->flags & USING_MSIX) ? qset_idx + 1 : | 400 | (adap->flags & USING_MSIX) ? qset_idx + 1 : |
434 | irq_idx, | 401 | irq_idx, |
435 | &adap->params.sge.qset[qset_idx], ntxq, | 402 | &adap->params.sge.qset[qset_idx], ntxq, dev); |
436 | j == 0 ? dev : | ||
437 | adap-> dummy_netdev[dummy_dev_idx++]); | ||
438 | if (err) { | 403 | if (err) { |
439 | t3_free_sge_resources(adap); | 404 | t3_free_sge_resources(adap); |
440 | return err; | 405 | return err; |
@@ -845,21 +810,18 @@ static int cxgb_up(struct adapter *adap) | |||
845 | goto out; | 810 | goto out; |
846 | } | 811 | } |
847 | 812 | ||
848 | err = init_dummy_netdevs(adap); | ||
849 | if (err) | ||
850 | goto out; | ||
851 | |||
852 | err = t3_init_hw(adap, 0); | 813 | err = t3_init_hw(adap, 0); |
853 | if (err) | 814 | if (err) |
854 | goto out; | 815 | goto out; |
855 | 816 | ||
856 | t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); | 817 | t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); |
857 | 818 | ||
858 | err = setup_sge_qsets(adap); | 819 | err = setup_sge_qsets(adap); |
859 | if (err) | 820 | if (err) |
860 | goto out; | 821 | goto out; |
861 | 822 | ||
862 | setup_rss(adap); | 823 | setup_rss(adap); |
824 | init_napi(adap); | ||
863 | adap->flags |= FULL_INIT_DONE; | 825 | adap->flags |= FULL_INIT_DONE; |
864 | } | 826 | } |
865 | 827 | ||
@@ -886,6 +848,7 @@ static int cxgb_up(struct adapter *adap) | |||
886 | adap->name, adap))) | 848 | adap->name, adap))) |
887 | goto irq_err; | 849 | goto irq_err; |
888 | 850 | ||
851 | enable_all_napi(adap); | ||
889 | t3_sge_start(adap); | 852 | t3_sge_start(adap); |
890 | t3_intr_enable(adap); | 853 | t3_intr_enable(adap); |
891 | 854 | ||
@@ -1012,8 +975,10 @@ static int cxgb_open(struct net_device *dev) | |||
1012 | int other_ports = adapter->open_device_map & PORT_MASK; | 975 | int other_ports = adapter->open_device_map & PORT_MASK; |
1013 | int err; | 976 | int err; |
1014 | 977 | ||
1015 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) | 978 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { |
979 | quiesce_rx(adapter); | ||
1016 | return err; | 980 | return err; |
981 | } | ||
1017 | 982 | ||
1018 | set_bit(pi->port_id, &adapter->open_device_map); | 983 | set_bit(pi->port_id, &adapter->open_device_map); |
1019 | if (is_offload(adapter) && !ofld_disable) { | 984 | if (is_offload(adapter) && !ofld_disable) { |
@@ -2524,7 +2489,6 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
2524 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2489 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2525 | netdev->poll_controller = cxgb_netpoll; | 2490 | netdev->poll_controller = cxgb_netpoll; |
2526 | #endif | 2491 | #endif |
2527 | netdev->weight = 64; | ||
2528 | 2492 | ||
2529 | SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); | 2493 | SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); |
2530 | } | 2494 | } |
@@ -2625,12 +2589,6 @@ static void __devexit remove_one(struct pci_dev *pdev) | |||
2625 | t3_free_sge_resources(adapter); | 2589 | t3_free_sge_resources(adapter); |
2626 | cxgb_disable_msi(adapter); | 2590 | cxgb_disable_msi(adapter); |
2627 | 2591 | ||
2628 | for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++) | ||
2629 | if (adapter->dummy_netdev[i]) { | ||
2630 | free_netdev(adapter->dummy_netdev[i]); | ||
2631 | adapter->dummy_netdev[i] = NULL; | ||
2632 | } | ||
2633 | |||
2634 | for_each_port(adapter, i) | 2592 | for_each_port(adapter, i) |
2635 | if (adapter->port[i]) | 2593 | if (adapter->port[i]) |
2636 | free_netdev(adapter->port[i]); | 2594 | free_netdev(adapter->port[i]); |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 58a5f60521ed..069c1aca8a6b 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -591,9 +591,6 @@ void t3_free_qset(struct adapter *adapter, struct sge_qset *q) | |||
591 | q->rspq.desc, q->rspq.phys_addr); | 591 | q->rspq.desc, q->rspq.phys_addr); |
592 | } | 592 | } |
593 | 593 | ||
594 | if (q->netdev) | ||
595 | q->netdev->atalk_ptr = NULL; | ||
596 | |||
597 | memset(q, 0, sizeof(*q)); | 594 | memset(q, 0, sizeof(*q)); |
598 | } | 595 | } |
599 | 596 | ||
@@ -1074,7 +1071,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1074 | unsigned int ndesc, pidx, credits, gen, compl; | 1071 | unsigned int ndesc, pidx, credits, gen, compl; |
1075 | const struct port_info *pi = netdev_priv(dev); | 1072 | const struct port_info *pi = netdev_priv(dev); |
1076 | struct adapter *adap = pi->adapter; | 1073 | struct adapter *adap = pi->adapter; |
1077 | struct sge_qset *qs = dev2qset(dev); | 1074 | struct sge_qset *qs = pi->qs; |
1078 | struct sge_txq *q = &qs->txq[TXQ_ETH]; | 1075 | struct sge_txq *q = &qs->txq[TXQ_ETH]; |
1079 | 1076 | ||
1080 | /* | 1077 | /* |
@@ -1326,13 +1323,12 @@ static void restart_ctrlq(unsigned long data) | |||
1326 | struct sk_buff *skb; | 1323 | struct sk_buff *skb; |
1327 | struct sge_qset *qs = (struct sge_qset *)data; | 1324 | struct sge_qset *qs = (struct sge_qset *)data; |
1328 | struct sge_txq *q = &qs->txq[TXQ_CTRL]; | 1325 | struct sge_txq *q = &qs->txq[TXQ_CTRL]; |
1329 | const struct port_info *pi = netdev_priv(qs->netdev); | ||
1330 | struct adapter *adap = pi->adapter; | ||
1331 | 1326 | ||
1332 | spin_lock(&q->lock); | 1327 | spin_lock(&q->lock); |
1333 | again:reclaim_completed_tx_imm(q); | 1328 | again:reclaim_completed_tx_imm(q); |
1334 | 1329 | ||
1335 | while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) { | 1330 | while (q->in_use < q->size && |
1331 | (skb = __skb_dequeue(&q->sendq)) != NULL) { | ||
1336 | 1332 | ||
1337 | write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); | 1333 | write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); |
1338 | 1334 | ||
@@ -1354,7 +1350,7 @@ static void restart_ctrlq(unsigned long data) | |||
1354 | } | 1350 | } |
1355 | 1351 | ||
1356 | spin_unlock(&q->lock); | 1352 | spin_unlock(&q->lock); |
1357 | t3_write_reg(adap, A_SG_KDOORBELL, | 1353 | t3_write_reg(qs->adap, A_SG_KDOORBELL, |
1358 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | 1354 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); |
1359 | } | 1355 | } |
1360 | 1356 | ||
@@ -1638,8 +1634,7 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) | |||
1638 | else { | 1634 | else { |
1639 | struct sge_qset *qs = rspq_to_qset(q); | 1635 | struct sge_qset *qs = rspq_to_qset(q); |
1640 | 1636 | ||
1641 | if (__netif_rx_schedule_prep(qs->netdev)) | 1637 | napi_schedule(&qs->napi); |
1642 | __netif_rx_schedule(qs->netdev); | ||
1643 | q->rx_head = skb; | 1638 | q->rx_head = skb; |
1644 | } | 1639 | } |
1645 | q->rx_tail = skb; | 1640 | q->rx_tail = skb; |
@@ -1675,34 +1670,30 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev, | |||
1675 | * receive handler. Batches need to be of modest size as we do prefetches | 1670 | * receive handler. Batches need to be of modest size as we do prefetches |
1676 | * on the packets in each. | 1671 | * on the packets in each. |
1677 | */ | 1672 | */ |
1678 | static int ofld_poll(struct net_device *dev, int *budget) | 1673 | static int ofld_poll(struct napi_struct *napi, int budget) |
1679 | { | 1674 | { |
1680 | const struct port_info *pi = netdev_priv(dev); | 1675 | struct sge_qset *qs = container_of(napi, struct sge_qset, napi); |
1681 | struct adapter *adapter = pi->adapter; | ||
1682 | struct sge_qset *qs = dev2qset(dev); | ||
1683 | struct sge_rspq *q = &qs->rspq; | 1676 | struct sge_rspq *q = &qs->rspq; |
1684 | int work_done, limit = min(*budget, dev->quota), avail = limit; | 1677 | struct adapter *adapter = qs->adap; |
1678 | int work_done = 0; | ||
1685 | 1679 | ||
1686 | while (avail) { | 1680 | while (work_done < budget) { |
1687 | struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE]; | 1681 | struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE]; |
1688 | int ngathered; | 1682 | int ngathered; |
1689 | 1683 | ||
1690 | spin_lock_irq(&q->lock); | 1684 | spin_lock_irq(&q->lock); |
1691 | head = q->rx_head; | 1685 | head = q->rx_head; |
1692 | if (!head) { | 1686 | if (!head) { |
1693 | work_done = limit - avail; | 1687 | napi_complete(napi); |
1694 | *budget -= work_done; | ||
1695 | dev->quota -= work_done; | ||
1696 | __netif_rx_complete(dev); | ||
1697 | spin_unlock_irq(&q->lock); | 1688 | spin_unlock_irq(&q->lock); |
1698 | return 0; | 1689 | return work_done; |
1699 | } | 1690 | } |
1700 | 1691 | ||
1701 | tail = q->rx_tail; | 1692 | tail = q->rx_tail; |
1702 | q->rx_head = q->rx_tail = NULL; | 1693 | q->rx_head = q->rx_tail = NULL; |
1703 | spin_unlock_irq(&q->lock); | 1694 | spin_unlock_irq(&q->lock); |
1704 | 1695 | ||
1705 | for (ngathered = 0; avail && head; avail--) { | 1696 | for (ngathered = 0; work_done < budget && head; work_done++) { |
1706 | prefetch(head->data); | 1697 | prefetch(head->data); |
1707 | skbs[ngathered] = head; | 1698 | skbs[ngathered] = head; |
1708 | head = head->next; | 1699 | head = head->next; |
@@ -1724,10 +1715,8 @@ static int ofld_poll(struct net_device *dev, int *budget) | |||
1724 | } | 1715 | } |
1725 | deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); | 1716 | deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); |
1726 | } | 1717 | } |
1727 | work_done = limit - avail; | 1718 | |
1728 | *budget -= work_done; | 1719 | return work_done; |
1729 | dev->quota -= work_done; | ||
1730 | return 1; | ||
1731 | } | 1720 | } |
1732 | 1721 | ||
1733 | /** | 1722 | /** |
@@ -2071,50 +2060,47 @@ static inline int is_pure_response(const struct rsp_desc *r) | |||
2071 | 2060 | ||
2072 | /** | 2061 | /** |
2073 | * napi_rx_handler - the NAPI handler for Rx processing | 2062 | * napi_rx_handler - the NAPI handler for Rx processing |
2074 | * @dev: the net device | 2063 | * @napi: the napi instance |
2075 | * @budget: how many packets we can process in this round | 2064 | * @budget: how many packets we can process in this round |
2076 | * | 2065 | * |
2077 | * Handler for new data events when using NAPI. | 2066 | * Handler for new data events when using NAPI. |
2078 | */ | 2067 | */ |
2079 | static int napi_rx_handler(struct net_device *dev, int *budget) | 2068 | static int napi_rx_handler(struct napi_struct *napi, int budget) |
2080 | { | 2069 | { |
2081 | const struct port_info *pi = netdev_priv(dev); | 2070 | struct sge_qset *qs = container_of(napi, struct sge_qset, napi); |
2082 | struct adapter *adap = pi->adapter; | 2071 | struct adapter *adap = qs->adap; |
2083 | struct sge_qset *qs = dev2qset(dev); | 2072 | int work_done = process_responses(adap, qs, budget); |
2084 | int effective_budget = min(*budget, dev->quota); | ||
2085 | |||
2086 | int work_done = process_responses(adap, qs, effective_budget); | ||
2087 | *budget -= work_done; | ||
2088 | dev->quota -= work_done; | ||
2089 | 2073 | ||
2090 | if (work_done >= effective_budget) | 2074 | if (likely(work_done < budget)) { |
2091 | return 1; | 2075 | napi_complete(napi); |
2092 | |||
2093 | netif_rx_complete(dev); | ||
2094 | 2076 | ||
2095 | /* | 2077 | /* |
2096 | * Because we don't atomically flush the following write it is | 2078 | * Because we don't atomically flush the following |
2097 | * possible that in very rare cases it can reach the device in a way | 2079 | * write it is possible that in very rare cases it can |
2098 | * that races with a new response being written plus an error interrupt | 2080 | * reach the device in a way that races with a new |
2099 | * causing the NAPI interrupt handler below to return unhandled status | 2081 | * response being written plus an error interrupt |
2100 | * to the OS. To protect against this would require flushing the write | 2082 | * causing the NAPI interrupt handler below to return |
2101 | * and doing both the write and the flush with interrupts off. Way too | 2083 | * unhandled status to the OS. To protect against |
2102 | * expensive and unjustifiable given the rarity of the race. | 2084 | * this would require flushing the write and doing |
2103 | * | 2085 | * both the write and the flush with interrupts off. |
2104 | * The race cannot happen at all with MSI-X. | 2086 | * Way too expensive and unjustifiable given the |
2105 | */ | 2087 | * rarity of the race. |
2106 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | | 2088 | * |
2107 | V_NEWTIMER(qs->rspq.next_holdoff) | | 2089 | * The race cannot happen at all with MSI-X. |
2108 | V_NEWINDEX(qs->rspq.cidx)); | 2090 | */ |
2109 | return 0; | 2091 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | |
2092 | V_NEWTIMER(qs->rspq.next_holdoff) | | ||
2093 | V_NEWINDEX(qs->rspq.cidx)); | ||
2094 | } | ||
2095 | return work_done; | ||
2110 | } | 2096 | } |
2111 | 2097 | ||
2112 | /* | 2098 | /* |
2113 | * Returns true if the device is already scheduled for polling. | 2099 | * Returns true if the device is already scheduled for polling. |
2114 | */ | 2100 | */ |
2115 | static inline int napi_is_scheduled(struct net_device *dev) | 2101 | static inline int napi_is_scheduled(struct napi_struct *napi) |
2116 | { | 2102 | { |
2117 | return test_bit(__LINK_STATE_RX_SCHED, &dev->state); | 2103 | return test_bit(NAPI_STATE_SCHED, &napi->state); |
2118 | } | 2104 | } |
2119 | 2105 | ||
2120 | /** | 2106 | /** |
@@ -2197,8 +2183,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) | |||
2197 | V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); | 2183 | V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); |
2198 | return 0; | 2184 | return 0; |
2199 | } | 2185 | } |
2200 | if (likely(__netif_rx_schedule_prep(qs->netdev))) | 2186 | napi_schedule(&qs->napi); |
2201 | __netif_rx_schedule(qs->netdev); | ||
2202 | return 1; | 2187 | return 1; |
2203 | } | 2188 | } |
2204 | 2189 | ||
@@ -2209,8 +2194,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) | |||
2209 | irqreturn_t t3_sge_intr_msix(int irq, void *cookie) | 2194 | irqreturn_t t3_sge_intr_msix(int irq, void *cookie) |
2210 | { | 2195 | { |
2211 | struct sge_qset *qs = cookie; | 2196 | struct sge_qset *qs = cookie; |
2212 | const struct port_info *pi = netdev_priv(qs->netdev); | 2197 | struct adapter *adap = qs->adap; |
2213 | struct adapter *adap = pi->adapter; | ||
2214 | struct sge_rspq *q = &qs->rspq; | 2198 | struct sge_rspq *q = &qs->rspq; |
2215 | 2199 | ||
2216 | spin_lock(&q->lock); | 2200 | spin_lock(&q->lock); |
@@ -2229,13 +2213,11 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie) | |||
2229 | irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) | 2213 | irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) |
2230 | { | 2214 | { |
2231 | struct sge_qset *qs = cookie; | 2215 | struct sge_qset *qs = cookie; |
2232 | const struct port_info *pi = netdev_priv(qs->netdev); | ||
2233 | struct adapter *adap = pi->adapter; | ||
2234 | struct sge_rspq *q = &qs->rspq; | 2216 | struct sge_rspq *q = &qs->rspq; |
2235 | 2217 | ||
2236 | spin_lock(&q->lock); | 2218 | spin_lock(&q->lock); |
2237 | 2219 | ||
2238 | if (handle_responses(adap, q) < 0) | 2220 | if (handle_responses(qs->adap, q) < 0) |
2239 | q->unhandled_irqs++; | 2221 | q->unhandled_irqs++; |
2240 | spin_unlock(&q->lock); | 2222 | spin_unlock(&q->lock); |
2241 | return IRQ_HANDLED; | 2223 | return IRQ_HANDLED; |
@@ -2278,11 +2260,13 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie) | |||
2278 | return IRQ_HANDLED; | 2260 | return IRQ_HANDLED; |
2279 | } | 2261 | } |
2280 | 2262 | ||
2281 | static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q) | 2263 | static int rspq_check_napi(struct sge_qset *qs) |
2282 | { | 2264 | { |
2283 | if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) { | 2265 | struct sge_rspq *q = &qs->rspq; |
2284 | if (likely(__netif_rx_schedule_prep(dev))) | 2266 | |
2285 | __netif_rx_schedule(dev); | 2267 | if (!napi_is_scheduled(&qs->napi) && |
2268 | is_new_response(&q->desc[q->cidx], q)) { | ||
2269 | napi_schedule(&qs->napi); | ||
2286 | return 1; | 2270 | return 1; |
2287 | } | 2271 | } |
2288 | return 0; | 2272 | return 0; |
@@ -2303,10 +2287,9 @@ irqreturn_t t3_intr_msi_napi(int irq, void *cookie) | |||
2303 | 2287 | ||
2304 | spin_lock(&q->lock); | 2288 | spin_lock(&q->lock); |
2305 | 2289 | ||
2306 | new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q); | 2290 | new_packets = rspq_check_napi(&adap->sge.qs[0]); |
2307 | if (adap->params.nports == 2) | 2291 | if (adap->params.nports == 2) |
2308 | new_packets += rspq_check_napi(adap->sge.qs[1].netdev, | 2292 | new_packets += rspq_check_napi(&adap->sge.qs[1]); |
2309 | &adap->sge.qs[1].rspq); | ||
2310 | if (!new_packets && t3_slow_intr_handler(adap) == 0) | 2293 | if (!new_packets && t3_slow_intr_handler(adap) == 0) |
2311 | q->unhandled_irqs++; | 2294 | q->unhandled_irqs++; |
2312 | 2295 | ||
@@ -2409,9 +2392,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie) | |||
2409 | static irqreturn_t t3b_intr_napi(int irq, void *cookie) | 2392 | static irqreturn_t t3b_intr_napi(int irq, void *cookie) |
2410 | { | 2393 | { |
2411 | u32 map; | 2394 | u32 map; |
2412 | struct net_device *dev; | ||
2413 | struct adapter *adap = cookie; | 2395 | struct adapter *adap = cookie; |
2414 | struct sge_rspq *q0 = &adap->sge.qs[0].rspq; | 2396 | struct sge_qset *qs0 = &adap->sge.qs[0]; |
2397 | struct sge_rspq *q0 = &qs0->rspq; | ||
2415 | 2398 | ||
2416 | t3_write_reg(adap, A_PL_CLI, 0); | 2399 | t3_write_reg(adap, A_PL_CLI, 0); |
2417 | map = t3_read_reg(adap, A_SG_DATA_INTR); | 2400 | map = t3_read_reg(adap, A_SG_DATA_INTR); |
@@ -2424,18 +2407,11 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie) | |||
2424 | if (unlikely(map & F_ERRINTR)) | 2407 | if (unlikely(map & F_ERRINTR)) |
2425 | t3_slow_intr_handler(adap); | 2408 | t3_slow_intr_handler(adap); |
2426 | 2409 | ||
2427 | if (likely(map & 1)) { | 2410 | if (likely(map & 1)) |
2428 | dev = adap->sge.qs[0].netdev; | 2411 | napi_schedule(&qs0->napi); |
2429 | |||
2430 | if (likely(__netif_rx_schedule_prep(dev))) | ||
2431 | __netif_rx_schedule(dev); | ||
2432 | } | ||
2433 | if (map & 2) { | ||
2434 | dev = adap->sge.qs[1].netdev; | ||
2435 | 2412 | ||
2436 | if (likely(__netif_rx_schedule_prep(dev))) | 2413 | if (map & 2) |
2437 | __netif_rx_schedule(dev); | 2414 | napi_schedule(&adap->sge.qs[1].napi); |
2438 | } | ||
2439 | 2415 | ||
2440 | spin_unlock(&q0->lock); | 2416 | spin_unlock(&q0->lock); |
2441 | return IRQ_HANDLED; | 2417 | return IRQ_HANDLED; |
@@ -2514,8 +2490,7 @@ static void sge_timer_cb(unsigned long data) | |||
2514 | { | 2490 | { |
2515 | spinlock_t *lock; | 2491 | spinlock_t *lock; |
2516 | struct sge_qset *qs = (struct sge_qset *)data; | 2492 | struct sge_qset *qs = (struct sge_qset *)data; |
2517 | const struct port_info *pi = netdev_priv(qs->netdev); | 2493 | struct adapter *adap = qs->adap; |
2518 | struct adapter *adap = pi->adapter; | ||
2519 | 2494 | ||
2520 | if (spin_trylock(&qs->txq[TXQ_ETH].lock)) { | 2495 | if (spin_trylock(&qs->txq[TXQ_ETH].lock)) { |
2521 | reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]); | 2496 | reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]); |
@@ -2526,9 +2501,9 @@ static void sge_timer_cb(unsigned long data) | |||
2526 | spin_unlock(&qs->txq[TXQ_OFLD].lock); | 2501 | spin_unlock(&qs->txq[TXQ_OFLD].lock); |
2527 | } | 2502 | } |
2528 | lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock : | 2503 | lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock : |
2529 | &adap->sge.qs[0].rspq.lock; | 2504 | &adap->sge.qs[0].rspq.lock; |
2530 | if (spin_trylock_irq(lock)) { | 2505 | if (spin_trylock_irq(lock)) { |
2531 | if (!napi_is_scheduled(qs->netdev)) { | 2506 | if (!napi_is_scheduled(&qs->napi)) { |
2532 | u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); | 2507 | u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); |
2533 | 2508 | ||
2534 | if (qs->fl[0].credits < qs->fl[0].size) | 2509 | if (qs->fl[0].credits < qs->fl[0].size) |
@@ -2562,12 +2537,9 @@ static void sge_timer_cb(unsigned long data) | |||
2562 | */ | 2537 | */ |
2563 | void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) | 2538 | void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) |
2564 | { | 2539 | { |
2565 | if (!qs->netdev) | ||
2566 | return; | ||
2567 | |||
2568 | qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ | 2540 | qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ |
2569 | qs->rspq.polling = p->polling; | 2541 | qs->rspq.polling = p->polling; |
2570 | qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll; | 2542 | qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; |
2571 | } | 2543 | } |
2572 | 2544 | ||
2573 | /** | 2545 | /** |
@@ -2587,7 +2559,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) | |||
2587 | */ | 2559 | */ |
2588 | int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | 2560 | int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, |
2589 | int irq_vec_idx, const struct qset_params *p, | 2561 | int irq_vec_idx, const struct qset_params *p, |
2590 | int ntxq, struct net_device *netdev) | 2562 | int ntxq, struct net_device *dev) |
2591 | { | 2563 | { |
2592 | int i, ret = -ENOMEM; | 2564 | int i, ret = -ENOMEM; |
2593 | struct sge_qset *q = &adapter->sge.qs[id]; | 2565 | struct sge_qset *q = &adapter->sge.qs[id]; |
@@ -2708,16 +2680,10 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2708 | } | 2680 | } |
2709 | 2681 | ||
2710 | spin_unlock(&adapter->sge.reg_lock); | 2682 | spin_unlock(&adapter->sge.reg_lock); |
2711 | q->netdev = netdev; | ||
2712 | t3_update_qset_coalesce(q, p); | ||
2713 | 2683 | ||
2714 | /* | 2684 | q->adap = adapter; |
2715 | * We use atalk_ptr as a backpointer to a qset. In case a device is | 2685 | q->netdev = dev; |
2716 | * associated with multiple queue sets only the first one sets | 2686 | t3_update_qset_coalesce(q, p); |
2717 | * atalk_ptr. | ||
2718 | */ | ||
2719 | if (netdev->atalk_ptr == NULL) | ||
2720 | netdev->atalk_ptr = q; | ||
2721 | 2687 | ||
2722 | refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); | 2688 | refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); |
2723 | refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); | 2689 | refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 280313b9b069..e25f5ec2b279 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -539,6 +539,7 @@ struct nic { | |||
539 | struct csr __iomem *csr; | 539 | struct csr __iomem *csr; |
540 | enum scb_cmd_lo cuc_cmd; | 540 | enum scb_cmd_lo cuc_cmd; |
541 | unsigned int cbs_avail; | 541 | unsigned int cbs_avail; |
542 | struct napi_struct napi; | ||
542 | struct cb *cbs; | 543 | struct cb *cbs; |
543 | struct cb *cb_to_use; | 544 | struct cb *cb_to_use; |
544 | struct cb *cb_to_send; | 545 | struct cb *cb_to_send; |
@@ -1974,35 +1975,31 @@ static irqreturn_t e100_intr(int irq, void *dev_id) | |||
1974 | if(stat_ack & stat_ack_rnr) | 1975 | if(stat_ack & stat_ack_rnr) |
1975 | nic->ru_running = RU_SUSPENDED; | 1976 | nic->ru_running = RU_SUSPENDED; |
1976 | 1977 | ||
1977 | if(likely(netif_rx_schedule_prep(netdev))) { | 1978 | if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) { |
1978 | e100_disable_irq(nic); | 1979 | e100_disable_irq(nic); |
1979 | __netif_rx_schedule(netdev); | 1980 | __netif_rx_schedule(netdev, &nic->napi); |
1980 | } | 1981 | } |
1981 | 1982 | ||
1982 | return IRQ_HANDLED; | 1983 | return IRQ_HANDLED; |
1983 | } | 1984 | } |
1984 | 1985 | ||
1985 | static int e100_poll(struct net_device *netdev, int *budget) | 1986 | static int e100_poll(struct napi_struct *napi, int budget) |
1986 | { | 1987 | { |
1987 | struct nic *nic = netdev_priv(netdev); | 1988 | struct nic *nic = container_of(napi, struct nic, napi); |
1988 | unsigned int work_to_do = min(netdev->quota, *budget); | 1989 | struct net_device *netdev = nic->netdev; |
1989 | unsigned int work_done = 0; | 1990 | int work_done = 0; |
1990 | int tx_cleaned; | 1991 | int tx_cleaned; |
1991 | 1992 | ||
1992 | e100_rx_clean(nic, &work_done, work_to_do); | 1993 | e100_rx_clean(nic, &work_done, budget); |
1993 | tx_cleaned = e100_tx_clean(nic); | 1994 | tx_cleaned = e100_tx_clean(nic); |
1994 | 1995 | ||
1995 | /* If no Rx and Tx cleanup work was done, exit polling mode. */ | 1996 | /* If no Rx and Tx cleanup work was done, exit polling mode. */ |
1996 | if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { | 1997 | if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { |
1997 | netif_rx_complete(netdev); | 1998 | netif_rx_complete(netdev, napi); |
1998 | e100_enable_irq(nic); | 1999 | e100_enable_irq(nic); |
1999 | return 0; | ||
2000 | } | 2000 | } |
2001 | 2001 | ||
2002 | *budget -= work_done; | 2002 | return work_done; |
2003 | netdev->quota -= work_done; | ||
2004 | |||
2005 | return 1; | ||
2006 | } | 2003 | } |
2007 | 2004 | ||
2008 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2005 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -2071,7 +2068,7 @@ static int e100_up(struct nic *nic) | |||
2071 | nic->netdev->name, nic->netdev))) | 2068 | nic->netdev->name, nic->netdev))) |
2072 | goto err_no_irq; | 2069 | goto err_no_irq; |
2073 | netif_wake_queue(nic->netdev); | 2070 | netif_wake_queue(nic->netdev); |
2074 | netif_poll_enable(nic->netdev); | 2071 | napi_enable(&nic->napi); |
2075 | /* enable ints _after_ enabling poll, preventing a race between | 2072 | /* enable ints _after_ enabling poll, preventing a race between |
2076 | * disable ints+schedule */ | 2073 | * disable ints+schedule */ |
2077 | e100_enable_irq(nic); | 2074 | e100_enable_irq(nic); |
@@ -2089,7 +2086,7 @@ err_rx_clean_list: | |||
2089 | static void e100_down(struct nic *nic) | 2086 | static void e100_down(struct nic *nic) |
2090 | { | 2087 | { |
2091 | /* wait here for poll to complete */ | 2088 | /* wait here for poll to complete */ |
2092 | netif_poll_disable(nic->netdev); | 2089 | napi_disable(&nic->napi); |
2093 | netif_stop_queue(nic->netdev); | 2090 | netif_stop_queue(nic->netdev); |
2094 | e100_hw_reset(nic); | 2091 | e100_hw_reset(nic); |
2095 | free_irq(nic->pdev->irq, nic->netdev); | 2092 | free_irq(nic->pdev->irq, nic->netdev); |
@@ -2572,14 +2569,13 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2572 | SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); | 2569 | SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); |
2573 | netdev->tx_timeout = e100_tx_timeout; | 2570 | netdev->tx_timeout = e100_tx_timeout; |
2574 | netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; | 2571 | netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; |
2575 | netdev->poll = e100_poll; | ||
2576 | netdev->weight = E100_NAPI_WEIGHT; | ||
2577 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2572 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2578 | netdev->poll_controller = e100_netpoll; | 2573 | netdev->poll_controller = e100_netpoll; |
2579 | #endif | 2574 | #endif |
2580 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | 2575 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); |
2581 | 2576 | ||
2582 | nic = netdev_priv(netdev); | 2577 | nic = netdev_priv(netdev); |
2578 | netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); | ||
2583 | nic->netdev = netdev; | 2579 | nic->netdev = netdev; |
2584 | nic->pdev = pdev; | 2580 | nic->pdev = pdev; |
2585 | nic->msg_enable = (1 << debug) - 1; | 2581 | nic->msg_enable = (1 << debug) - 1; |
@@ -2733,7 +2729,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2733 | struct nic *nic = netdev_priv(netdev); | 2729 | struct nic *nic = netdev_priv(netdev); |
2734 | 2730 | ||
2735 | if (netif_running(netdev)) | 2731 | if (netif_running(netdev)) |
2736 | netif_poll_disable(nic->netdev); | 2732 | napi_disable(&nic->napi); |
2737 | del_timer_sync(&nic->watchdog); | 2733 | del_timer_sync(&nic->watchdog); |
2738 | netif_carrier_off(nic->netdev); | 2734 | netif_carrier_off(nic->netdev); |
2739 | netif_device_detach(netdev); | 2735 | netif_device_detach(netdev); |
@@ -2779,7 +2775,7 @@ static void e100_shutdown(struct pci_dev *pdev) | |||
2779 | struct nic *nic = netdev_priv(netdev); | 2775 | struct nic *nic = netdev_priv(netdev); |
2780 | 2776 | ||
2781 | if (netif_running(netdev)) | 2777 | if (netif_running(netdev)) |
2782 | netif_poll_disable(nic->netdev); | 2778 | napi_disable(&nic->napi); |
2783 | del_timer_sync(&nic->watchdog); | 2779 | del_timer_sync(&nic->watchdog); |
2784 | netif_carrier_off(nic->netdev); | 2780 | netif_carrier_off(nic->netdev); |
2785 | 2781 | ||
@@ -2804,12 +2800,13 @@ static void e100_shutdown(struct pci_dev *pdev) | |||
2804 | static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | 2800 | static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) |
2805 | { | 2801 | { |
2806 | struct net_device *netdev = pci_get_drvdata(pdev); | 2802 | struct net_device *netdev = pci_get_drvdata(pdev); |
2803 | struct nic *nic = netdev_priv(netdev); | ||
2807 | 2804 | ||
2808 | /* Similar to calling e100_down(), but avoids adpater I/O. */ | 2805 | /* Similar to calling e100_down(), but avoids adpater I/O. */ |
2809 | netdev->stop(netdev); | 2806 | netdev->stop(netdev); |
2810 | 2807 | ||
2811 | /* Detach; put netif into state similar to hotplug unplug. */ | 2808 | /* Detach; put netif into state similar to hotplug unplug. */ |
2812 | netif_poll_enable(netdev); | 2809 | napi_enable(&nic->napi); |
2813 | netif_device_detach(netdev); | 2810 | netif_device_detach(netdev); |
2814 | pci_disable_device(pdev); | 2811 | pci_disable_device(pdev); |
2815 | 2812 | ||
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 16a6edfeba41..781ed9968489 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -300,6 +300,7 @@ struct e1000_adapter { | |||
300 | int cleaned_count); | 300 | int cleaned_count); |
301 | struct e1000_rx_ring *rx_ring; /* One per active queue */ | 301 | struct e1000_rx_ring *rx_ring; /* One per active queue */ |
302 | #ifdef CONFIG_E1000_NAPI | 302 | #ifdef CONFIG_E1000_NAPI |
303 | struct napi_struct napi; | ||
303 | struct net_device *polling_netdev; /* One per active queue */ | 304 | struct net_device *polling_netdev; /* One per active queue */ |
304 | #endif | 305 | #endif |
305 | int num_tx_queues; | 306 | int num_tx_queues; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index e7c8951f47fa..723568d6e44a 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -166,7 +166,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data); | |||
166 | static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, | 166 | static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, |
167 | struct e1000_tx_ring *tx_ring); | 167 | struct e1000_tx_ring *tx_ring); |
168 | #ifdef CONFIG_E1000_NAPI | 168 | #ifdef CONFIG_E1000_NAPI |
169 | static int e1000_clean(struct net_device *poll_dev, int *budget); | 169 | static int e1000_clean(struct napi_struct *napi, int budget); |
170 | static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, | 170 | static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, |
171 | struct e1000_rx_ring *rx_ring, | 171 | struct e1000_rx_ring *rx_ring, |
172 | int *work_done, int work_to_do); | 172 | int *work_done, int work_to_do); |
@@ -545,7 +545,7 @@ int e1000_up(struct e1000_adapter *adapter) | |||
545 | clear_bit(__E1000_DOWN, &adapter->flags); | 545 | clear_bit(__E1000_DOWN, &adapter->flags); |
546 | 546 | ||
547 | #ifdef CONFIG_E1000_NAPI | 547 | #ifdef CONFIG_E1000_NAPI |
548 | netif_poll_enable(adapter->netdev); | 548 | napi_enable(&adapter->napi); |
549 | #endif | 549 | #endif |
550 | e1000_irq_enable(adapter); | 550 | e1000_irq_enable(adapter); |
551 | 551 | ||
@@ -634,7 +634,7 @@ e1000_down(struct e1000_adapter *adapter) | |||
634 | set_bit(__E1000_DOWN, &adapter->flags); | 634 | set_bit(__E1000_DOWN, &adapter->flags); |
635 | 635 | ||
636 | #ifdef CONFIG_E1000_NAPI | 636 | #ifdef CONFIG_E1000_NAPI |
637 | netif_poll_disable(netdev); | 637 | napi_disable(&adapter->napi); |
638 | #endif | 638 | #endif |
639 | e1000_irq_disable(adapter); | 639 | e1000_irq_disable(adapter); |
640 | 640 | ||
@@ -936,8 +936,7 @@ e1000_probe(struct pci_dev *pdev, | |||
936 | netdev->tx_timeout = &e1000_tx_timeout; | 936 | netdev->tx_timeout = &e1000_tx_timeout; |
937 | netdev->watchdog_timeo = 5 * HZ; | 937 | netdev->watchdog_timeo = 5 * HZ; |
938 | #ifdef CONFIG_E1000_NAPI | 938 | #ifdef CONFIG_E1000_NAPI |
939 | netdev->poll = &e1000_clean; | 939 | netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); |
940 | netdev->weight = 64; | ||
941 | #endif | 940 | #endif |
942 | netdev->vlan_rx_register = e1000_vlan_rx_register; | 941 | netdev->vlan_rx_register = e1000_vlan_rx_register; |
943 | netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; | 942 | netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; |
@@ -1151,9 +1150,6 @@ e1000_probe(struct pci_dev *pdev, | |||
1151 | /* tell the stack to leave us alone until e1000_open() is called */ | 1150 | /* tell the stack to leave us alone until e1000_open() is called */ |
1152 | netif_carrier_off(netdev); | 1151 | netif_carrier_off(netdev); |
1153 | netif_stop_queue(netdev); | 1152 | netif_stop_queue(netdev); |
1154 | #ifdef CONFIG_E1000_NAPI | ||
1155 | netif_poll_disable(netdev); | ||
1156 | #endif | ||
1157 | 1153 | ||
1158 | strcpy(netdev->name, "eth%d"); | 1154 | strcpy(netdev->name, "eth%d"); |
1159 | if ((err = register_netdev(netdev))) | 1155 | if ((err = register_netdev(netdev))) |
@@ -1222,12 +1218,13 @@ e1000_remove(struct pci_dev *pdev) | |||
1222 | * would have already happened in close and is redundant. */ | 1218 | * would have already happened in close and is redundant. */ |
1223 | e1000_release_hw_control(adapter); | 1219 | e1000_release_hw_control(adapter); |
1224 | 1220 | ||
1225 | unregister_netdev(netdev); | ||
1226 | #ifdef CONFIG_E1000_NAPI | 1221 | #ifdef CONFIG_E1000_NAPI |
1227 | for (i = 0; i < adapter->num_rx_queues; i++) | 1222 | for (i = 0; i < adapter->num_rx_queues; i++) |
1228 | dev_put(&adapter->polling_netdev[i]); | 1223 | dev_put(&adapter->polling_netdev[i]); |
1229 | #endif | 1224 | #endif |
1230 | 1225 | ||
1226 | unregister_netdev(netdev); | ||
1227 | |||
1231 | if (!e1000_check_phy_reset_block(&adapter->hw)) | 1228 | if (!e1000_check_phy_reset_block(&adapter->hw)) |
1232 | e1000_phy_hw_reset(&adapter->hw); | 1229 | e1000_phy_hw_reset(&adapter->hw); |
1233 | 1230 | ||
@@ -1325,8 +1322,6 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
1325 | #ifdef CONFIG_E1000_NAPI | 1322 | #ifdef CONFIG_E1000_NAPI |
1326 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1323 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1327 | adapter->polling_netdev[i].priv = adapter; | 1324 | adapter->polling_netdev[i].priv = adapter; |
1328 | adapter->polling_netdev[i].poll = &e1000_clean; | ||
1329 | adapter->polling_netdev[i].weight = 64; | ||
1330 | dev_hold(&adapter->polling_netdev[i]); | 1325 | dev_hold(&adapter->polling_netdev[i]); |
1331 | set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); | 1326 | set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); |
1332 | } | 1327 | } |
@@ -1443,7 +1438,7 @@ e1000_open(struct net_device *netdev) | |||
1443 | clear_bit(__E1000_DOWN, &adapter->flags); | 1438 | clear_bit(__E1000_DOWN, &adapter->flags); |
1444 | 1439 | ||
1445 | #ifdef CONFIG_E1000_NAPI | 1440 | #ifdef CONFIG_E1000_NAPI |
1446 | netif_poll_enable(netdev); | 1441 | napi_enable(&adapter->napi); |
1447 | #endif | 1442 | #endif |
1448 | 1443 | ||
1449 | e1000_irq_enable(adapter); | 1444 | e1000_irq_enable(adapter); |
@@ -3786,12 +3781,12 @@ e1000_intr_msi(int irq, void *data) | |||
3786 | } | 3781 | } |
3787 | 3782 | ||
3788 | #ifdef CONFIG_E1000_NAPI | 3783 | #ifdef CONFIG_E1000_NAPI |
3789 | if (likely(netif_rx_schedule_prep(netdev))) { | 3784 | if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { |
3790 | adapter->total_tx_bytes = 0; | 3785 | adapter->total_tx_bytes = 0; |
3791 | adapter->total_tx_packets = 0; | 3786 | adapter->total_tx_packets = 0; |
3792 | adapter->total_rx_bytes = 0; | 3787 | adapter->total_rx_bytes = 0; |
3793 | adapter->total_rx_packets = 0; | 3788 | adapter->total_rx_packets = 0; |
3794 | __netif_rx_schedule(netdev); | 3789 | __netif_rx_schedule(netdev, &adapter->napi); |
3795 | } else | 3790 | } else |
3796 | e1000_irq_enable(adapter); | 3791 | e1000_irq_enable(adapter); |
3797 | #else | 3792 | #else |
@@ -3871,12 +3866,12 @@ e1000_intr(int irq, void *data) | |||
3871 | E1000_WRITE_REG(hw, IMC, ~0); | 3866 | E1000_WRITE_REG(hw, IMC, ~0); |
3872 | E1000_WRITE_FLUSH(hw); | 3867 | E1000_WRITE_FLUSH(hw); |
3873 | } | 3868 | } |
3874 | if (likely(netif_rx_schedule_prep(netdev))) { | 3869 | if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { |
3875 | adapter->total_tx_bytes = 0; | 3870 | adapter->total_tx_bytes = 0; |
3876 | adapter->total_tx_packets = 0; | 3871 | adapter->total_tx_packets = 0; |
3877 | adapter->total_rx_bytes = 0; | 3872 | adapter->total_rx_bytes = 0; |
3878 | adapter->total_rx_packets = 0; | 3873 | adapter->total_rx_packets = 0; |
3879 | __netif_rx_schedule(netdev); | 3874 | __netif_rx_schedule(netdev, &adapter->napi); |
3880 | } else | 3875 | } else |
3881 | /* this really should not happen! if it does it is basically a | 3876 | /* this really should not happen! if it does it is basically a |
3882 | * bug, but not a hard error, so enable ints and continue */ | 3877 | * bug, but not a hard error, so enable ints and continue */ |
@@ -3924,10 +3919,10 @@ e1000_intr(int irq, void *data) | |||
3924 | **/ | 3919 | **/ |
3925 | 3920 | ||
3926 | static int | 3921 | static int |
3927 | e1000_clean(struct net_device *poll_dev, int *budget) | 3922 | e1000_clean(struct napi_struct *napi, int budget) |
3928 | { | 3923 | { |
3929 | struct e1000_adapter *adapter; | 3924 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); |
3930 | int work_to_do = min(*budget, poll_dev->quota); | 3925 | struct net_device *poll_dev = adapter->netdev; |
3931 | int tx_cleaned = 0, work_done = 0; | 3926 | int tx_cleaned = 0, work_done = 0; |
3932 | 3927 | ||
3933 | /* Must NOT use netdev_priv macro here. */ | 3928 | /* Must NOT use netdev_priv macro here. */ |
@@ -3948,23 +3943,19 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3948 | } | 3943 | } |
3949 | 3944 | ||
3950 | adapter->clean_rx(adapter, &adapter->rx_ring[0], | 3945 | adapter->clean_rx(adapter, &adapter->rx_ring[0], |
3951 | &work_done, work_to_do); | 3946 | &work_done, budget); |
3952 | |||
3953 | *budget -= work_done; | ||
3954 | poll_dev->quota -= work_done; | ||
3955 | 3947 | ||
3956 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 3948 | /* If no Tx and not enough Rx work done, exit the polling mode */ |
3957 | if ((!tx_cleaned && (work_done == 0)) || | 3949 | if ((!tx_cleaned && (work_done < budget)) || |
3958 | !netif_running(poll_dev)) { | 3950 | !netif_running(poll_dev)) { |
3959 | quit_polling: | 3951 | quit_polling: |
3960 | if (likely(adapter->itr_setting & 3)) | 3952 | if (likely(adapter->itr_setting & 3)) |
3961 | e1000_set_itr(adapter); | 3953 | e1000_set_itr(adapter); |
3962 | netif_rx_complete(poll_dev); | 3954 | netif_rx_complete(poll_dev, napi); |
3963 | e1000_irq_enable(adapter); | 3955 | e1000_irq_enable(adapter); |
3964 | return 0; | ||
3965 | } | 3956 | } |
3966 | 3957 | ||
3967 | return 1; | 3958 | return work_done; |
3968 | } | 3959 | } |
3969 | 3960 | ||
3970 | #endif | 3961 | #endif |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 8d58be56f4e3..a154681165b9 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -351,6 +351,7 @@ struct ehea_q_skb_arr { | |||
351 | * Port resources | 351 | * Port resources |
352 | */ | 352 | */ |
353 | struct ehea_port_res { | 353 | struct ehea_port_res { |
354 | struct napi_struct napi; | ||
354 | struct port_stats p_stats; | 355 | struct port_stats p_stats; |
355 | struct ehea_mr send_mr; /* send memory region */ | 356 | struct ehea_mr send_mr; /* send memory region */ |
356 | struct ehea_mr recv_mr; /* receive memory region */ | 357 | struct ehea_mr recv_mr; /* receive memory region */ |
@@ -362,7 +363,6 @@ struct ehea_port_res { | |||
362 | struct ehea_cq *send_cq; | 363 | struct ehea_cq *send_cq; |
363 | struct ehea_cq *recv_cq; | 364 | struct ehea_cq *recv_cq; |
364 | struct ehea_eq *eq; | 365 | struct ehea_eq *eq; |
365 | struct net_device *d_netdev; | ||
366 | struct ehea_q_skb_arr rq1_skba; | 366 | struct ehea_q_skb_arr rq1_skba; |
367 | struct ehea_q_skb_arr rq2_skba; | 367 | struct ehea_q_skb_arr rq2_skba; |
368 | struct ehea_q_skb_arr rq3_skba; | 368 | struct ehea_q_skb_arr rq3_skba; |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 717b12984d10..5ebd545ab04e 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -393,9 +393,9 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, | |||
393 | return 0; | 393 | return 0; |
394 | } | 394 | } |
395 | 395 | ||
396 | static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, | 396 | static int ehea_proc_rwqes(struct net_device *dev, |
397 | struct ehea_port_res *pr, | 397 | struct ehea_port_res *pr, |
398 | int *budget) | 398 | int budget) |
399 | { | 399 | { |
400 | struct ehea_port *port = pr->port; | 400 | struct ehea_port *port = pr->port; |
401 | struct ehea_qp *qp = pr->qp; | 401 | struct ehea_qp *qp = pr->qp; |
@@ -408,18 +408,16 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, | |||
408 | int skb_arr_rq2_len = pr->rq2_skba.len; | 408 | int skb_arr_rq2_len = pr->rq2_skba.len; |
409 | int skb_arr_rq3_len = pr->rq3_skba.len; | 409 | int skb_arr_rq3_len = pr->rq3_skba.len; |
410 | int processed, processed_rq1, processed_rq2, processed_rq3; | 410 | int processed, processed_rq1, processed_rq2, processed_rq3; |
411 | int wqe_index, last_wqe_index, rq, my_quota, port_reset; | 411 | int wqe_index, last_wqe_index, rq, port_reset; |
412 | 412 | ||
413 | processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; | 413 | processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; |
414 | last_wqe_index = 0; | 414 | last_wqe_index = 0; |
415 | my_quota = min(*budget, dev->quota); | ||
416 | 415 | ||
417 | cqe = ehea_poll_rq1(qp, &wqe_index); | 416 | cqe = ehea_poll_rq1(qp, &wqe_index); |
418 | while ((my_quota > 0) && cqe) { | 417 | while ((processed < budget) && cqe) { |
419 | ehea_inc_rq1(qp); | 418 | ehea_inc_rq1(qp); |
420 | processed_rq1++; | 419 | processed_rq1++; |
421 | processed++; | 420 | processed++; |
422 | my_quota--; | ||
423 | if (netif_msg_rx_status(port)) | 421 | if (netif_msg_rx_status(port)) |
424 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | 422 | ehea_dump(cqe, sizeof(*cqe), "CQE"); |
425 | 423 | ||
@@ -434,14 +432,14 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, | |||
434 | if (netif_msg_rx_err(port)) | 432 | if (netif_msg_rx_err(port)) |
435 | ehea_error("LL rq1: skb=NULL"); | 433 | ehea_error("LL rq1: skb=NULL"); |
436 | 434 | ||
437 | skb = netdev_alloc_skb(port->netdev, | 435 | skb = netdev_alloc_skb(dev, |
438 | EHEA_L_PKT_SIZE); | 436 | EHEA_L_PKT_SIZE); |
439 | if (!skb) | 437 | if (!skb) |
440 | break; | 438 | break; |
441 | } | 439 | } |
442 | skb_copy_to_linear_data(skb, ((char*)cqe) + 64, | 440 | skb_copy_to_linear_data(skb, ((char*)cqe) + 64, |
443 | cqe->num_bytes_transfered - 4); | 441 | cqe->num_bytes_transfered - 4); |
444 | ehea_fill_skb(port->netdev, skb, cqe); | 442 | ehea_fill_skb(dev, skb, cqe); |
445 | } else if (rq == 2) { /* RQ2 */ | 443 | } else if (rq == 2) { /* RQ2 */ |
446 | skb = get_skb_by_index(skb_arr_rq2, | 444 | skb = get_skb_by_index(skb_arr_rq2, |
447 | skb_arr_rq2_len, cqe); | 445 | skb_arr_rq2_len, cqe); |
@@ -450,7 +448,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, | |||
450 | ehea_error("rq2: skb=NULL"); | 448 | ehea_error("rq2: skb=NULL"); |
451 | break; | 449 | break; |
452 | } | 450 | } |
453 | ehea_fill_skb(port->netdev, skb, cqe); | 451 | ehea_fill_skb(dev, skb, cqe); |
454 | processed_rq2++; | 452 | processed_rq2++; |
455 | } else { /* RQ3 */ | 453 | } else { /* RQ3 */ |
456 | skb = get_skb_by_index(skb_arr_rq3, | 454 | skb = get_skb_by_index(skb_arr_rq3, |
@@ -460,7 +458,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, | |||
460 | ehea_error("rq3: skb=NULL"); | 458 | ehea_error("rq3: skb=NULL"); |
461 | break; | 459 | break; |
462 | } | 460 | } |
463 | ehea_fill_skb(port->netdev, skb, cqe); | 461 | ehea_fill_skb(dev, skb, cqe); |
464 | processed_rq3++; | 462 | processed_rq3++; |
465 | } | 463 | } |
466 | 464 | ||
@@ -471,7 +469,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, | |||
471 | else | 469 | else |
472 | netif_receive_skb(skb); | 470 | netif_receive_skb(skb); |
473 | 471 | ||
474 | port->netdev->last_rx = jiffies; | 472 | dev->last_rx = jiffies; |
475 | } else { | 473 | } else { |
476 | pr->p_stats.poll_receive_errors++; | 474 | pr->p_stats.poll_receive_errors++; |
477 | port_reset = ehea_treat_poll_error(pr, rq, cqe, | 475 | port_reset = ehea_treat_poll_error(pr, rq, cqe, |
@@ -484,14 +482,12 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, | |||
484 | } | 482 | } |
485 | 483 | ||
486 | pr->rx_packets += processed; | 484 | pr->rx_packets += processed; |
487 | *budget -= processed; | ||
488 | 485 | ||
489 | ehea_refill_rq1(pr, last_wqe_index, processed_rq1); | 486 | ehea_refill_rq1(pr, last_wqe_index, processed_rq1); |
490 | ehea_refill_rq2(pr, processed_rq2); | 487 | ehea_refill_rq2(pr, processed_rq2); |
491 | ehea_refill_rq3(pr, processed_rq3); | 488 | ehea_refill_rq3(pr, processed_rq3); |
492 | 489 | ||
493 | cqe = ehea_poll_rq1(qp, &wqe_index); | 490 | return processed; |
494 | return cqe; | ||
495 | } | 491 | } |
496 | 492 | ||
497 | static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | 493 | static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) |
@@ -554,22 +550,27 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | |||
554 | } | 550 | } |
555 | 551 | ||
556 | #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16 | 552 | #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16 |
553 | #define EHEA_POLL_MAX_CQES 65535 | ||
557 | 554 | ||
558 | static int ehea_poll(struct net_device *dev, int *budget) | 555 | static int ehea_poll(struct napi_struct *napi, int budget) |
559 | { | 556 | { |
560 | struct ehea_port_res *pr = dev->priv; | 557 | struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi); |
558 | struct net_device *dev = pr->port->netdev; | ||
561 | struct ehea_cqe *cqe; | 559 | struct ehea_cqe *cqe; |
562 | struct ehea_cqe *cqe_skb = NULL; | 560 | struct ehea_cqe *cqe_skb = NULL; |
563 | int force_irq, wqe_index; | 561 | int force_irq, wqe_index; |
564 | 562 | int rx = 0; | |
565 | cqe = ehea_poll_rq1(pr->qp, &wqe_index); | ||
566 | cqe_skb = ehea_poll_cq(pr->send_cq); | ||
567 | 563 | ||
568 | force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ); | 564 | force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ); |
565 | cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); | ||
566 | |||
567 | if (!force_irq) | ||
568 | rx += ehea_proc_rwqes(dev, pr, budget - rx); | ||
569 | 569 | ||
570 | if ((!cqe && !cqe_skb) || force_irq) { | 570 | while ((rx != budget) || force_irq) { |
571 | pr->poll_counter = 0; | 571 | pr->poll_counter = 0; |
572 | netif_rx_complete(dev); | 572 | force_irq = 0; |
573 | netif_rx_complete(dev, napi); | ||
573 | ehea_reset_cq_ep(pr->recv_cq); | 574 | ehea_reset_cq_ep(pr->recv_cq); |
574 | ehea_reset_cq_ep(pr->send_cq); | 575 | ehea_reset_cq_ep(pr->send_cq); |
575 | ehea_reset_cq_n1(pr->recv_cq); | 576 | ehea_reset_cq_n1(pr->recv_cq); |
@@ -578,43 +579,35 @@ static int ehea_poll(struct net_device *dev, int *budget) | |||
578 | cqe_skb = ehea_poll_cq(pr->send_cq); | 579 | cqe_skb = ehea_poll_cq(pr->send_cq); |
579 | 580 | ||
580 | if (!cqe && !cqe_skb) | 581 | if (!cqe && !cqe_skb) |
581 | return 0; | 582 | return rx; |
582 | 583 | ||
583 | if (!netif_rx_reschedule(dev, dev->quota)) | 584 | if (!netif_rx_reschedule(dev, napi)) |
584 | return 0; | 585 | return rx; |
585 | } | ||
586 | |||
587 | cqe = ehea_proc_rwqes(dev, pr, budget); | ||
588 | cqe_skb = ehea_proc_cqes(pr, 300); | ||
589 | 586 | ||
590 | if (cqe || cqe_skb) | 587 | cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); |
591 | pr->poll_counter++; | 588 | rx += ehea_proc_rwqes(dev, pr, budget - rx); |
589 | } | ||
592 | 590 | ||
593 | return 1; | 591 | pr->poll_counter++; |
592 | return rx; | ||
594 | } | 593 | } |
595 | 594 | ||
596 | #ifdef CONFIG_NET_POLL_CONTROLLER | 595 | #ifdef CONFIG_NET_POLL_CONTROLLER |
597 | static void ehea_netpoll(struct net_device *dev) | 596 | static void ehea_netpoll(struct net_device *dev) |
598 | { | 597 | { |
599 | struct ehea_port *port = netdev_priv(dev); | 598 | struct ehea_port *port = netdev_priv(dev); |
599 | int i; | ||
600 | 600 | ||
601 | netif_rx_schedule(port->port_res[0].d_netdev); | 601 | for (i = 0; i < port->num_def_qps; i++) |
602 | netif_rx_schedule(dev, &port->port_res[i].napi); | ||
602 | } | 603 | } |
603 | #endif | 604 | #endif |
604 | 605 | ||
605 | static int ehea_poll_firstqueue(struct net_device *dev, int *budget) | ||
606 | { | ||
607 | struct ehea_port *port = netdev_priv(dev); | ||
608 | struct net_device *d_dev = port->port_res[0].d_netdev; | ||
609 | |||
610 | return ehea_poll(d_dev, budget); | ||
611 | } | ||
612 | |||
613 | static irqreturn_t ehea_recv_irq_handler(int irq, void *param) | 606 | static irqreturn_t ehea_recv_irq_handler(int irq, void *param) |
614 | { | 607 | { |
615 | struct ehea_port_res *pr = param; | 608 | struct ehea_port_res *pr = param; |
616 | 609 | ||
617 | netif_rx_schedule(pr->d_netdev); | 610 | netif_rx_schedule(pr->port->netdev, &pr->napi); |
618 | 611 | ||
619 | return IRQ_HANDLED; | 612 | return IRQ_HANDLED; |
620 | } | 613 | } |
@@ -1236,14 +1229,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1236 | 1229 | ||
1237 | kfree(init_attr); | 1230 | kfree(init_attr); |
1238 | 1231 | ||
1239 | pr->d_netdev = alloc_netdev(0, "", ether_setup); | 1232 | netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); |
1240 | if (!pr->d_netdev) | ||
1241 | goto out_free; | ||
1242 | pr->d_netdev->priv = pr; | ||
1243 | pr->d_netdev->weight = 64; | ||
1244 | pr->d_netdev->poll = ehea_poll; | ||
1245 | set_bit(__LINK_STATE_START, &pr->d_netdev->state); | ||
1246 | strcpy(pr->d_netdev->name, port->netdev->name); | ||
1247 | 1233 | ||
1248 | ret = 0; | 1234 | ret = 0; |
1249 | goto out; | 1235 | goto out; |
@@ -1266,8 +1252,6 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) | |||
1266 | { | 1252 | { |
1267 | int ret, i; | 1253 | int ret, i; |
1268 | 1254 | ||
1269 | free_netdev(pr->d_netdev); | ||
1270 | |||
1271 | ret = ehea_destroy_qp(pr->qp); | 1255 | ret = ehea_destroy_qp(pr->qp); |
1272 | 1256 | ||
1273 | if (!ret) { | 1257 | if (!ret) { |
@@ -2248,6 +2232,22 @@ out: | |||
2248 | return ret; | 2232 | return ret; |
2249 | } | 2233 | } |
2250 | 2234 | ||
2235 | static void port_napi_disable(struct ehea_port *port) | ||
2236 | { | ||
2237 | int i; | ||
2238 | |||
2239 | for (i = 0; i < port->num_def_qps; i++) | ||
2240 | napi_disable(&port->port_res[i].napi); | ||
2241 | } | ||
2242 | |||
2243 | static void port_napi_enable(struct ehea_port *port) | ||
2244 | { | ||
2245 | int i; | ||
2246 | |||
2247 | for (i = 0; i < port->num_def_qps; i++) | ||
2248 | napi_enable(&port->port_res[i].napi); | ||
2249 | } | ||
2250 | |||
2251 | static int ehea_open(struct net_device *dev) | 2251 | static int ehea_open(struct net_device *dev) |
2252 | { | 2252 | { |
2253 | int ret; | 2253 | int ret; |
@@ -2259,8 +2259,10 @@ static int ehea_open(struct net_device *dev) | |||
2259 | ehea_info("enabling port %s", dev->name); | 2259 | ehea_info("enabling port %s", dev->name); |
2260 | 2260 | ||
2261 | ret = ehea_up(dev); | 2261 | ret = ehea_up(dev); |
2262 | if (!ret) | 2262 | if (!ret) { |
2263 | port_napi_enable(port); | ||
2263 | netif_start_queue(dev); | 2264 | netif_start_queue(dev); |
2265 | } | ||
2264 | 2266 | ||
2265 | up(&port->port_lock); | 2267 | up(&port->port_lock); |
2266 | 2268 | ||
@@ -2269,7 +2271,7 @@ static int ehea_open(struct net_device *dev) | |||
2269 | 2271 | ||
2270 | static int ehea_down(struct net_device *dev) | 2272 | static int ehea_down(struct net_device *dev) |
2271 | { | 2273 | { |
2272 | int ret, i; | 2274 | int ret; |
2273 | struct ehea_port *port = netdev_priv(dev); | 2275 | struct ehea_port *port = netdev_priv(dev); |
2274 | 2276 | ||
2275 | if (port->state == EHEA_PORT_DOWN) | 2277 | if (port->state == EHEA_PORT_DOWN) |
@@ -2278,10 +2280,7 @@ static int ehea_down(struct net_device *dev) | |||
2278 | ehea_drop_multicast_list(dev); | 2280 | ehea_drop_multicast_list(dev); |
2279 | ehea_free_interrupts(dev); | 2281 | ehea_free_interrupts(dev); |
2280 | 2282 | ||
2281 | for (i = 0; i < port->num_def_qps; i++) | 2283 | port_napi_disable(port); |
2282 | while (test_bit(__LINK_STATE_RX_SCHED, | ||
2283 | &port->port_res[i].d_netdev->state)) | ||
2284 | msleep(1); | ||
2285 | 2284 | ||
2286 | port->state = EHEA_PORT_DOWN; | 2285 | port->state = EHEA_PORT_DOWN; |
2287 | 2286 | ||
@@ -2319,7 +2318,8 @@ static void ehea_reset_port(struct work_struct *work) | |||
2319 | port->resets++; | 2318 | port->resets++; |
2320 | down(&port->port_lock); | 2319 | down(&port->port_lock); |
2321 | netif_stop_queue(dev); | 2320 | netif_stop_queue(dev); |
2322 | netif_poll_disable(dev); | 2321 | |
2322 | port_napi_disable(port); | ||
2323 | 2323 | ||
2324 | ehea_down(dev); | 2324 | ehea_down(dev); |
2325 | 2325 | ||
@@ -2330,7 +2330,8 @@ static void ehea_reset_port(struct work_struct *work) | |||
2330 | if (netif_msg_timer(port)) | 2330 | if (netif_msg_timer(port)) |
2331 | ehea_info("Device %s resetted successfully", dev->name); | 2331 | ehea_info("Device %s resetted successfully", dev->name); |
2332 | 2332 | ||
2333 | netif_poll_enable(dev); | 2333 | port_napi_enable(port); |
2334 | |||
2334 | netif_wake_queue(dev); | 2335 | netif_wake_queue(dev); |
2335 | out: | 2336 | out: |
2336 | up(&port->port_lock); | 2337 | up(&port->port_lock); |
@@ -2358,7 +2359,9 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2358 | dev->name); | 2359 | dev->name); |
2359 | down(&port->port_lock); | 2360 | down(&port->port_lock); |
2360 | netif_stop_queue(dev); | 2361 | netif_stop_queue(dev); |
2361 | netif_poll_disable(dev); | 2362 | |
2363 | port_napi_disable(port); | ||
2364 | |||
2362 | ehea_down(dev); | 2365 | ehea_down(dev); |
2363 | up(&port->port_lock); | 2366 | up(&port->port_lock); |
2364 | } | 2367 | } |
@@ -2406,7 +2409,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2406 | 2409 | ||
2407 | ret = ehea_up(dev); | 2410 | ret = ehea_up(dev); |
2408 | if (!ret) { | 2411 | if (!ret) { |
2409 | netif_poll_enable(dev); | 2412 | port_napi_enable(port); |
2410 | netif_wake_queue(dev); | 2413 | netif_wake_queue(dev); |
2411 | } | 2414 | } |
2412 | 2415 | ||
@@ -2644,11 +2647,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
2644 | memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); | 2647 | memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); |
2645 | 2648 | ||
2646 | dev->open = ehea_open; | 2649 | dev->open = ehea_open; |
2647 | dev->poll = ehea_poll_firstqueue; | ||
2648 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2650 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2649 | dev->poll_controller = ehea_netpoll; | 2651 | dev->poll_controller = ehea_netpoll; |
2650 | #endif | 2652 | #endif |
2651 | dev->weight = 64; | ||
2652 | dev->stop = ehea_stop; | 2653 | dev->stop = ehea_stop; |
2653 | dev->hard_start_xmit = ehea_start_xmit; | 2654 | dev->hard_start_xmit = ehea_start_xmit; |
2654 | dev->get_stats = ehea_get_stats; | 2655 | dev->get_stats = ehea_get_stats; |
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 119778401e48..f8446e373bdd 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c | |||
@@ -262,6 +262,7 @@ struct epic_private { | |||
262 | /* Ring pointers. */ | 262 | /* Ring pointers. */ |
263 | spinlock_t lock; /* Group with Tx control cache line. */ | 263 | spinlock_t lock; /* Group with Tx control cache line. */ |
264 | spinlock_t napi_lock; | 264 | spinlock_t napi_lock; |
265 | struct napi_struct napi; | ||
265 | unsigned int reschedule_in_poll; | 266 | unsigned int reschedule_in_poll; |
266 | unsigned int cur_tx, dirty_tx; | 267 | unsigned int cur_tx, dirty_tx; |
267 | 268 | ||
@@ -294,7 +295,7 @@ static void epic_tx_timeout(struct net_device *dev); | |||
294 | static void epic_init_ring(struct net_device *dev); | 295 | static void epic_init_ring(struct net_device *dev); |
295 | static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev); | 296 | static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev); |
296 | static int epic_rx(struct net_device *dev, int budget); | 297 | static int epic_rx(struct net_device *dev, int budget); |
297 | static int epic_poll(struct net_device *dev, int *budget); | 298 | static int epic_poll(struct napi_struct *napi, int budget); |
298 | static irqreturn_t epic_interrupt(int irq, void *dev_instance); | 299 | static irqreturn_t epic_interrupt(int irq, void *dev_instance); |
299 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 300 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
300 | static const struct ethtool_ops netdev_ethtool_ops; | 301 | static const struct ethtool_ops netdev_ethtool_ops; |
@@ -487,8 +488,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
487 | dev->ethtool_ops = &netdev_ethtool_ops; | 488 | dev->ethtool_ops = &netdev_ethtool_ops; |
488 | dev->watchdog_timeo = TX_TIMEOUT; | 489 | dev->watchdog_timeo = TX_TIMEOUT; |
489 | dev->tx_timeout = &epic_tx_timeout; | 490 | dev->tx_timeout = &epic_tx_timeout; |
490 | dev->poll = epic_poll; | 491 | netif_napi_add(dev, &ep->napi, epic_poll, 64); |
491 | dev->weight = 64; | ||
492 | 492 | ||
493 | ret = register_netdev(dev); | 493 | ret = register_netdev(dev); |
494 | if (ret < 0) | 494 | if (ret < 0) |
@@ -660,8 +660,11 @@ static int epic_open(struct net_device *dev) | |||
660 | /* Soft reset the chip. */ | 660 | /* Soft reset the chip. */ |
661 | outl(0x4001, ioaddr + GENCTL); | 661 | outl(0x4001, ioaddr + GENCTL); |
662 | 662 | ||
663 | if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) | 663 | napi_enable(&ep->napi); |
664 | if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) { | ||
665 | napi_disable(&ep->napi); | ||
664 | return retval; | 666 | return retval; |
667 | } | ||
665 | 668 | ||
666 | epic_init_ring(dev); | 669 | epic_init_ring(dev); |
667 | 670 | ||
@@ -1103,9 +1106,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) | |||
1103 | 1106 | ||
1104 | if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { | 1107 | if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { |
1105 | spin_lock(&ep->napi_lock); | 1108 | spin_lock(&ep->napi_lock); |
1106 | if (netif_rx_schedule_prep(dev)) { | 1109 | if (netif_rx_schedule_prep(dev, &ep->napi)) { |
1107 | epic_napi_irq_off(dev, ep); | 1110 | epic_napi_irq_off(dev, ep); |
1108 | __netif_rx_schedule(dev); | 1111 | __netif_rx_schedule(dev, &ep->napi); |
1109 | } else | 1112 | } else |
1110 | ep->reschedule_in_poll++; | 1113 | ep->reschedule_in_poll++; |
1111 | spin_unlock(&ep->napi_lock); | 1114 | spin_unlock(&ep->napi_lock); |
@@ -1257,26 +1260,22 @@ static void epic_rx_err(struct net_device *dev, struct epic_private *ep) | |||
1257 | outw(RxQueued, ioaddr + COMMAND); | 1260 | outw(RxQueued, ioaddr + COMMAND); |
1258 | } | 1261 | } |
1259 | 1262 | ||
1260 | static int epic_poll(struct net_device *dev, int *budget) | 1263 | static int epic_poll(struct napi_struct *napi, int budget) |
1261 | { | 1264 | { |
1262 | struct epic_private *ep = dev->priv; | 1265 | struct epic_private *ep = container_of(napi, struct epic_private, napi); |
1263 | int work_done = 0, orig_budget; | 1266 | struct net_device *dev = ep->mii.dev; |
1267 | int work_done = 0; | ||
1264 | long ioaddr = dev->base_addr; | 1268 | long ioaddr = dev->base_addr; |
1265 | 1269 | ||
1266 | orig_budget = (*budget > dev->quota) ? dev->quota : *budget; | ||
1267 | |||
1268 | rx_action: | 1270 | rx_action: |
1269 | 1271 | ||
1270 | epic_tx(dev, ep); | 1272 | epic_tx(dev, ep); |
1271 | 1273 | ||
1272 | work_done += epic_rx(dev, *budget); | 1274 | work_done += epic_rx(dev, budget); |
1273 | 1275 | ||
1274 | epic_rx_err(dev, ep); | 1276 | epic_rx_err(dev, ep); |
1275 | 1277 | ||
1276 | *budget -= work_done; | 1278 | if (netif_running(dev) && (work_done < budget)) { |
1277 | dev->quota -= work_done; | ||
1278 | |||
1279 | if (netif_running(dev) && (work_done < orig_budget)) { | ||
1280 | unsigned long flags; | 1279 | unsigned long flags; |
1281 | int more; | 1280 | int more; |
1282 | 1281 | ||
@@ -1286,7 +1285,7 @@ rx_action: | |||
1286 | 1285 | ||
1287 | more = ep->reschedule_in_poll; | 1286 | more = ep->reschedule_in_poll; |
1288 | if (!more) { | 1287 | if (!more) { |
1289 | __netif_rx_complete(dev); | 1288 | __netif_rx_complete(dev, napi); |
1290 | outl(EpicNapiEvent, ioaddr + INTSTAT); | 1289 | outl(EpicNapiEvent, ioaddr + INTSTAT); |
1291 | epic_napi_irq_on(dev, ep); | 1290 | epic_napi_irq_on(dev, ep); |
1292 | } else | 1291 | } else |
@@ -1298,7 +1297,7 @@ rx_action: | |||
1298 | goto rx_action; | 1297 | goto rx_action; |
1299 | } | 1298 | } |
1300 | 1299 | ||
1301 | return (work_done >= orig_budget); | 1300 | return work_done; |
1302 | } | 1301 | } |
1303 | 1302 | ||
1304 | static int epic_close(struct net_device *dev) | 1303 | static int epic_close(struct net_device *dev) |
@@ -1309,6 +1308,7 @@ static int epic_close(struct net_device *dev) | |||
1309 | int i; | 1308 | int i; |
1310 | 1309 | ||
1311 | netif_stop_queue(dev); | 1310 | netif_stop_queue(dev); |
1311 | napi_disable(&ep->napi); | ||
1312 | 1312 | ||
1313 | if (debug > 1) | 1313 | if (debug > 1) |
1314 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", | 1314 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", |
diff --git a/drivers/net/fec_8xx/fec_8xx.h b/drivers/net/fec_8xx/fec_8xx.h index 5af60b0f9208..f3b1c6fbba8b 100644 --- a/drivers/net/fec_8xx/fec_8xx.h +++ b/drivers/net/fec_8xx/fec_8xx.h | |||
@@ -105,6 +105,8 @@ struct fec; | |||
105 | struct fec_enet_private { | 105 | struct fec_enet_private { |
106 | spinlock_t lock; /* during all ops except TX pckt processing */ | 106 | spinlock_t lock; /* during all ops except TX pckt processing */ |
107 | spinlock_t tx_lock; /* during fec_start_xmit and fec_tx */ | 107 | spinlock_t tx_lock; /* during fec_start_xmit and fec_tx */ |
108 | struct net_device *dev; | ||
109 | struct napi_struct napi; | ||
108 | int fecno; | 110 | int fecno; |
109 | struct fec *fecp; | 111 | struct fec *fecp; |
110 | const struct fec_platform_info *fpi; | 112 | const struct fec_platform_info *fpi; |
diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c index e5502af5b8e2..6348fb93ca9c 100644 --- a/drivers/net/fec_8xx/fec_main.c +++ b/drivers/net/fec_8xx/fec_main.c | |||
@@ -465,9 +465,9 @@ void fec_stop(struct net_device *dev) | |||
465 | } | 465 | } |
466 | 466 | ||
467 | /* common receive function */ | 467 | /* common receive function */ |
468 | static int fec_enet_rx_common(struct net_device *dev, int *budget) | 468 | static int fec_enet_rx_common(struct fec_enet_private *ep, |
469 | struct net_device *dev, int budget) | ||
469 | { | 470 | { |
470 | struct fec_enet_private *fep = netdev_priv(dev); | ||
471 | fec_t *fecp = fep->fecp; | 471 | fec_t *fecp = fep->fecp; |
472 | const struct fec_platform_info *fpi = fep->fpi; | 472 | const struct fec_platform_info *fpi = fep->fpi; |
473 | cbd_t *bdp; | 473 | cbd_t *bdp; |
@@ -475,11 +475,8 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) | |||
475 | int received = 0; | 475 | int received = 0; |
476 | __u16 pkt_len, sc; | 476 | __u16 pkt_len, sc; |
477 | int curidx; | 477 | int curidx; |
478 | int rx_work_limit; | ||
479 | 478 | ||
480 | if (fpi->use_napi) { | 479 | if (fpi->use_napi) { |
481 | rx_work_limit = min(dev->quota, *budget); | ||
482 | |||
483 | if (!netif_running(dev)) | 480 | if (!netif_running(dev)) |
484 | return 0; | 481 | return 0; |
485 | } | 482 | } |
@@ -530,11 +527,6 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) | |||
530 | BUG_ON(skbn == NULL); | 527 | BUG_ON(skbn == NULL); |
531 | 528 | ||
532 | } else { | 529 | } else { |
533 | |||
534 | /* napi, got packet but no quota */ | ||
535 | if (fpi->use_napi && --rx_work_limit < 0) | ||
536 | break; | ||
537 | |||
538 | skb = fep->rx_skbuff[curidx]; | 530 | skb = fep->rx_skbuff[curidx]; |
539 | BUG_ON(skb == NULL); | 531 | BUG_ON(skb == NULL); |
540 | 532 | ||
@@ -599,25 +591,24 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) | |||
599 | * able to keep up at the expense of system resources. | 591 | * able to keep up at the expense of system resources. |
600 | */ | 592 | */ |
601 | FW(fecp, r_des_active, 0x01000000); | 593 | FW(fecp, r_des_active, 0x01000000); |
594 | |||
595 | if (received >= budget) | ||
596 | break; | ||
597 | |||
602 | } | 598 | } |
603 | 599 | ||
604 | fep->cur_rx = bdp; | 600 | fep->cur_rx = bdp; |
605 | 601 | ||
606 | if (fpi->use_napi) { | 602 | if (fpi->use_napi) { |
607 | dev->quota -= received; | 603 | if (received < budget) { |
608 | *budget -= received; | 604 | netif_rx_complete(dev, &fep->napi); |
609 | |||
610 | if (rx_work_limit < 0) | ||
611 | return 1; /* not done */ | ||
612 | 605 | ||
613 | /* done */ | 606 | /* enable RX interrupt bits */ |
614 | netif_rx_complete(dev); | 607 | FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); |
615 | 608 | } | |
616 | /* enable RX interrupt bits */ | ||
617 | FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); | ||
618 | } | 609 | } |
619 | 610 | ||
620 | return 0; | 611 | return received; |
621 | } | 612 | } |
622 | 613 | ||
623 | static void fec_enet_tx(struct net_device *dev) | 614 | static void fec_enet_tx(struct net_device *dev) |
@@ -743,12 +734,12 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
743 | 734 | ||
744 | if ((int_events & FEC_ENET_RXF) != 0) { | 735 | if ((int_events & FEC_ENET_RXF) != 0) { |
745 | if (!fpi->use_napi) | 736 | if (!fpi->use_napi) |
746 | fec_enet_rx_common(dev, NULL); | 737 | fec_enet_rx_common(fep, dev, ~0); |
747 | else { | 738 | else { |
748 | if (netif_rx_schedule_prep(dev)) { | 739 | if (netif_rx_schedule_prep(dev, &fep->napi)) { |
749 | /* disable rx interrupts */ | 740 | /* disable rx interrupts */ |
750 | FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); | 741 | FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); |
751 | __netif_rx_schedule(dev); | 742 | __netif_rx_schedule(dev, &fep->napi); |
752 | } else { | 743 | } else { |
753 | printk(KERN_ERR DRV_MODULE_NAME | 744 | printk(KERN_ERR DRV_MODULE_NAME |
754 | ": %s driver bug! interrupt while in poll!\n", | 745 | ": %s driver bug! interrupt while in poll!\n", |
@@ -893,10 +884,13 @@ static int fec_enet_open(struct net_device *dev) | |||
893 | const struct fec_platform_info *fpi = fep->fpi; | 884 | const struct fec_platform_info *fpi = fep->fpi; |
894 | unsigned long flags; | 885 | unsigned long flags; |
895 | 886 | ||
887 | napi_enable(&fep->napi); | ||
888 | |||
896 | /* Install our interrupt handler. */ | 889 | /* Install our interrupt handler. */ |
897 | if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) { | 890 | if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) { |
898 | printk(KERN_ERR DRV_MODULE_NAME | 891 | printk(KERN_ERR DRV_MODULE_NAME |
899 | ": %s Could not allocate FEC IRQ!", dev->name); | 892 | ": %s Could not allocate FEC IRQ!", dev->name); |
893 | napi_disable(&fep->napi); | ||
900 | return -EINVAL; | 894 | return -EINVAL; |
901 | } | 895 | } |
902 | 896 | ||
@@ -907,6 +901,7 @@ static int fec_enet_open(struct net_device *dev) | |||
907 | printk(KERN_ERR DRV_MODULE_NAME | 901 | printk(KERN_ERR DRV_MODULE_NAME |
908 | ": %s Could not allocate PHY IRQ!", dev->name); | 902 | ": %s Could not allocate PHY IRQ!", dev->name); |
909 | free_irq(fpi->fec_irq, dev); | 903 | free_irq(fpi->fec_irq, dev); |
904 | napi_disable(&fep->napi); | ||
910 | return -EINVAL; | 905 | return -EINVAL; |
911 | } | 906 | } |
912 | 907 | ||
@@ -932,6 +927,7 @@ static int fec_enet_close(struct net_device *dev) | |||
932 | unsigned long flags; | 927 | unsigned long flags; |
933 | 928 | ||
934 | netif_stop_queue(dev); | 929 | netif_stop_queue(dev); |
930 | napi_disable(&fep->napi); | ||
935 | netif_carrier_off(dev); | 931 | netif_carrier_off(dev); |
936 | 932 | ||
937 | if (fpi->use_mdio) | 933 | if (fpi->use_mdio) |
@@ -955,9 +951,12 @@ static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) | |||
955 | return &fep->stats; | 951 | return &fep->stats; |
956 | } | 952 | } |
957 | 953 | ||
958 | static int fec_enet_poll(struct net_device *dev, int *budget) | 954 | static int fec_enet_poll(struct napi_struct *napi, int budget) |
959 | { | 955 | { |
960 | return fec_enet_rx_common(dev, budget); | 956 | struct fec_enet_private *fep = container_of(napi, struct fec_enet_private, napi); |
957 | struct net_device *dev = fep->dev; | ||
958 | |||
959 | return fec_enet_rx_common(fep, dev, budget); | ||
961 | } | 960 | } |
962 | 961 | ||
963 | /*************************************************************************/ | 962 | /*************************************************************************/ |
@@ -1107,6 +1106,7 @@ int fec_8xx_init_one(const struct fec_platform_info *fpi, | |||
1107 | SET_MODULE_OWNER(dev); | 1106 | SET_MODULE_OWNER(dev); |
1108 | 1107 | ||
1109 | fep = netdev_priv(dev); | 1108 | fep = netdev_priv(dev); |
1109 | fep->dev = dev; | ||
1110 | 1110 | ||
1111 | /* partial reset of FEC */ | 1111 | /* partial reset of FEC */ |
1112 | fec_whack_reset(fecp); | 1112 | fec_whack_reset(fecp); |
@@ -1172,10 +1172,9 @@ int fec_8xx_init_one(const struct fec_platform_info *fpi, | |||
1172 | dev->get_stats = fec_enet_get_stats; | 1172 | dev->get_stats = fec_enet_get_stats; |
1173 | dev->set_multicast_list = fec_set_multicast_list; | 1173 | dev->set_multicast_list = fec_set_multicast_list; |
1174 | dev->set_mac_address = fec_set_mac_address; | 1174 | dev->set_mac_address = fec_set_mac_address; |
1175 | if (fpi->use_napi) { | 1175 | netif_napi_add(dev, &fec->napi, |
1176 | dev->poll = fec_enet_poll; | 1176 | fec_enet_poll, fpi->napi_weight); |
1177 | dev->weight = fpi->napi_weight; | 1177 | |
1178 | } | ||
1179 | dev->ethtool_ops = &fec_ethtool_ops; | 1178 | dev->ethtool_ops = &fec_ethtool_ops; |
1180 | dev->do_ioctl = fec_ioctl; | 1179 | dev->do_ioctl = fec_ioctl; |
1181 | 1180 | ||
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 1938d6dfc863..24c1294614f2 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -159,6 +159,8 @@ | |||
159 | #define dprintk(x...) do { } while (0) | 159 | #define dprintk(x...) do { } while (0) |
160 | #endif | 160 | #endif |
161 | 161 | ||
162 | #define TX_WORK_PER_LOOP 64 | ||
163 | #define RX_WORK_PER_LOOP 64 | ||
162 | 164 | ||
163 | /* | 165 | /* |
164 | * Hardware access: | 166 | * Hardware access: |
@@ -745,6 +747,9 @@ struct nv_skb_map { | |||
745 | struct fe_priv { | 747 | struct fe_priv { |
746 | spinlock_t lock; | 748 | spinlock_t lock; |
747 | 749 | ||
750 | struct net_device *dev; | ||
751 | struct napi_struct napi; | ||
752 | |||
748 | /* General data: | 753 | /* General data: |
749 | * Locking: spin_lock(&np->lock); */ | 754 | * Locking: spin_lock(&np->lock); */ |
750 | struct net_device_stats stats; | 755 | struct net_device_stats stats; |
@@ -1586,9 +1591,10 @@ static int nv_alloc_rx_optimized(struct net_device *dev) | |||
1586 | static void nv_do_rx_refill(unsigned long data) | 1591 | static void nv_do_rx_refill(unsigned long data) |
1587 | { | 1592 | { |
1588 | struct net_device *dev = (struct net_device *) data; | 1593 | struct net_device *dev = (struct net_device *) data; |
1594 | struct fe_priv *np = netdev_priv(dev); | ||
1589 | 1595 | ||
1590 | /* Just reschedule NAPI rx processing */ | 1596 | /* Just reschedule NAPI rx processing */ |
1591 | netif_rx_schedule(dev); | 1597 | netif_rx_schedule(dev, &np->napi); |
1592 | } | 1598 | } |
1593 | #else | 1599 | #else |
1594 | static void nv_do_rx_refill(unsigned long data) | 1600 | static void nv_do_rx_refill(unsigned long data) |
@@ -2997,7 +3003,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
2997 | 3003 | ||
2998 | #ifdef CONFIG_FORCEDETH_NAPI | 3004 | #ifdef CONFIG_FORCEDETH_NAPI |
2999 | if (events & NVREG_IRQ_RX_ALL) { | 3005 | if (events & NVREG_IRQ_RX_ALL) { |
3000 | netif_rx_schedule(dev); | 3006 | netif_rx_schedule(dev, &np->napi); |
3001 | 3007 | ||
3002 | /* Disable furthur receive irq's */ | 3008 | /* Disable furthur receive irq's */ |
3003 | spin_lock(&np->lock); | 3009 | spin_lock(&np->lock); |
@@ -3010,7 +3016,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3010 | spin_unlock(&np->lock); | 3016 | spin_unlock(&np->lock); |
3011 | } | 3017 | } |
3012 | #else | 3018 | #else |
3013 | if (nv_rx_process(dev, dev->weight)) { | 3019 | if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { |
3014 | if (unlikely(nv_alloc_rx(dev))) { | 3020 | if (unlikely(nv_alloc_rx(dev))) { |
3015 | spin_lock(&np->lock); | 3021 | spin_lock(&np->lock); |
3016 | if (!np->in_shutdown) | 3022 | if (!np->in_shutdown) |
@@ -3079,8 +3085,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3079 | return IRQ_RETVAL(i); | 3085 | return IRQ_RETVAL(i); |
3080 | } | 3086 | } |
3081 | 3087 | ||
3082 | #define TX_WORK_PER_LOOP 64 | ||
3083 | #define RX_WORK_PER_LOOP 64 | ||
3084 | /** | 3088 | /** |
3085 | * All _optimized functions are used to help increase performance | 3089 | * All _optimized functions are used to help increase performance |
3086 | * (reduce CPU and increase throughput). They use descripter version 3, | 3090 | * (reduce CPU and increase throughput). They use descripter version 3, |
@@ -3114,7 +3118,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3114 | 3118 | ||
3115 | #ifdef CONFIG_FORCEDETH_NAPI | 3119 | #ifdef CONFIG_FORCEDETH_NAPI |
3116 | if (events & NVREG_IRQ_RX_ALL) { | 3120 | if (events & NVREG_IRQ_RX_ALL) { |
3117 | netif_rx_schedule(dev); | 3121 | netif_rx_schedule(dev, &np->napi); |
3118 | 3122 | ||
3119 | /* Disable furthur receive irq's */ | 3123 | /* Disable furthur receive irq's */ |
3120 | spin_lock(&np->lock); | 3124 | spin_lock(&np->lock); |
@@ -3127,7 +3131,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3127 | spin_unlock(&np->lock); | 3131 | spin_unlock(&np->lock); |
3128 | } | 3132 | } |
3129 | #else | 3133 | #else |
3130 | if (nv_rx_process_optimized(dev, dev->weight)) { | 3134 | if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { |
3131 | if (unlikely(nv_alloc_rx_optimized(dev))) { | 3135 | if (unlikely(nv_alloc_rx_optimized(dev))) { |
3132 | spin_lock(&np->lock); | 3136 | spin_lock(&np->lock); |
3133 | if (!np->in_shutdown) | 3137 | if (!np->in_shutdown) |
@@ -3245,19 +3249,19 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) | |||
3245 | } | 3249 | } |
3246 | 3250 | ||
3247 | #ifdef CONFIG_FORCEDETH_NAPI | 3251 | #ifdef CONFIG_FORCEDETH_NAPI |
3248 | static int nv_napi_poll(struct net_device *dev, int *budget) | 3252 | static int nv_napi_poll(struct napi_struct *napi, int budget) |
3249 | { | 3253 | { |
3250 | int pkts, limit = min(*budget, dev->quota); | 3254 | struct fe_priv *np = container_of(napi, struct fe_priv, napi); |
3251 | struct fe_priv *np = netdev_priv(dev); | 3255 | struct net_device *dev = np->dev; |
3252 | u8 __iomem *base = get_hwbase(dev); | 3256 | u8 __iomem *base = get_hwbase(dev); |
3253 | unsigned long flags; | 3257 | unsigned long flags; |
3254 | int retcode; | 3258 | int pkts, retcode; |
3255 | 3259 | ||
3256 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 3260 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3257 | pkts = nv_rx_process(dev, limit); | 3261 | pkts = nv_rx_process(dev, budget); |
3258 | retcode = nv_alloc_rx(dev); | 3262 | retcode = nv_alloc_rx(dev); |
3259 | } else { | 3263 | } else { |
3260 | pkts = nv_rx_process_optimized(dev, limit); | 3264 | pkts = nv_rx_process_optimized(dev, budget); |
3261 | retcode = nv_alloc_rx_optimized(dev); | 3265 | retcode = nv_alloc_rx_optimized(dev); |
3262 | } | 3266 | } |
3263 | 3267 | ||
@@ -3268,13 +3272,12 @@ static int nv_napi_poll(struct net_device *dev, int *budget) | |||
3268 | spin_unlock_irqrestore(&np->lock, flags); | 3272 | spin_unlock_irqrestore(&np->lock, flags); |
3269 | } | 3273 | } |
3270 | 3274 | ||
3271 | if (pkts < limit) { | 3275 | if (pkts < budget) { |
3272 | /* all done, no more packets present */ | ||
3273 | netif_rx_complete(dev); | ||
3274 | |||
3275 | /* re-enable receive interrupts */ | 3276 | /* re-enable receive interrupts */ |
3276 | spin_lock_irqsave(&np->lock, flags); | 3277 | spin_lock_irqsave(&np->lock, flags); |
3277 | 3278 | ||
3279 | __netif_rx_complete(dev, napi); | ||
3280 | |||
3278 | np->irqmask |= NVREG_IRQ_RX_ALL; | 3281 | np->irqmask |= NVREG_IRQ_RX_ALL; |
3279 | if (np->msi_flags & NV_MSI_X_ENABLED) | 3282 | if (np->msi_flags & NV_MSI_X_ENABLED) |
3280 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | 3283 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); |
@@ -3282,13 +3285,8 @@ static int nv_napi_poll(struct net_device *dev, int *budget) | |||
3282 | writel(np->irqmask, base + NvRegIrqMask); | 3285 | writel(np->irqmask, base + NvRegIrqMask); |
3283 | 3286 | ||
3284 | spin_unlock_irqrestore(&np->lock, flags); | 3287 | spin_unlock_irqrestore(&np->lock, flags); |
3285 | return 0; | ||
3286 | } else { | ||
3287 | /* used up our quantum, so reschedule */ | ||
3288 | dev->quota -= pkts; | ||
3289 | *budget -= pkts; | ||
3290 | return 1; | ||
3291 | } | 3288 | } |
3289 | return pkts; | ||
3292 | } | 3290 | } |
3293 | #endif | 3291 | #endif |
3294 | 3292 | ||
@@ -3296,6 +3294,7 @@ static int nv_napi_poll(struct net_device *dev, int *budget) | |||
3296 | static irqreturn_t nv_nic_irq_rx(int foo, void *data) | 3294 | static irqreturn_t nv_nic_irq_rx(int foo, void *data) |
3297 | { | 3295 | { |
3298 | struct net_device *dev = (struct net_device *) data; | 3296 | struct net_device *dev = (struct net_device *) data; |
3297 | struct fe_priv *np = netdev_priv(dev); | ||
3299 | u8 __iomem *base = get_hwbase(dev); | 3298 | u8 __iomem *base = get_hwbase(dev); |
3300 | u32 events; | 3299 | u32 events; |
3301 | 3300 | ||
@@ -3303,7 +3302,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) | |||
3303 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | 3302 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); |
3304 | 3303 | ||
3305 | if (events) { | 3304 | if (events) { |
3306 | netif_rx_schedule(dev); | 3305 | netif_rx_schedule(dev, &np->napi); |
3307 | /* disable receive interrupts on the nic */ | 3306 | /* disable receive interrupts on the nic */ |
3308 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | 3307 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); |
3309 | pci_push(base); | 3308 | pci_push(base); |
@@ -3329,7 +3328,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) | |||
3329 | if (!(events & np->irqmask)) | 3328 | if (!(events & np->irqmask)) |
3330 | break; | 3329 | break; |
3331 | 3330 | ||
3332 | if (nv_rx_process_optimized(dev, dev->weight)) { | 3331 | if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { |
3333 | if (unlikely(nv_alloc_rx_optimized(dev))) { | 3332 | if (unlikely(nv_alloc_rx_optimized(dev))) { |
3334 | spin_lock_irqsave(&np->lock, flags); | 3333 | spin_lock_irqsave(&np->lock, flags); |
3335 | if (!np->in_shutdown) | 3334 | if (!np->in_shutdown) |
@@ -4620,7 +4619,9 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
4620 | if (test->flags & ETH_TEST_FL_OFFLINE) { | 4619 | if (test->flags & ETH_TEST_FL_OFFLINE) { |
4621 | if (netif_running(dev)) { | 4620 | if (netif_running(dev)) { |
4622 | netif_stop_queue(dev); | 4621 | netif_stop_queue(dev); |
4623 | netif_poll_disable(dev); | 4622 | #ifdef CONFIG_FORCEDETH_NAPI |
4623 | napi_disable(&np->napi); | ||
4624 | #endif | ||
4624 | netif_tx_lock_bh(dev); | 4625 | netif_tx_lock_bh(dev); |
4625 | spin_lock_irq(&np->lock); | 4626 | spin_lock_irq(&np->lock); |
4626 | nv_disable_hw_interrupts(dev, np->irqmask); | 4627 | nv_disable_hw_interrupts(dev, np->irqmask); |
@@ -4679,7 +4680,9 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
4679 | nv_start_rx(dev); | 4680 | nv_start_rx(dev); |
4680 | nv_start_tx(dev); | 4681 | nv_start_tx(dev); |
4681 | netif_start_queue(dev); | 4682 | netif_start_queue(dev); |
4682 | netif_poll_enable(dev); | 4683 | #ifdef CONFIG_FORCEDETH_NAPI |
4684 | napi_enable(&np->napi); | ||
4685 | #endif | ||
4683 | nv_enable_hw_interrupts(dev, np->irqmask); | 4686 | nv_enable_hw_interrupts(dev, np->irqmask); |
4684 | } | 4687 | } |
4685 | } | 4688 | } |
@@ -4911,7 +4914,9 @@ static int nv_open(struct net_device *dev) | |||
4911 | nv_start_rx(dev); | 4914 | nv_start_rx(dev); |
4912 | nv_start_tx(dev); | 4915 | nv_start_tx(dev); |
4913 | netif_start_queue(dev); | 4916 | netif_start_queue(dev); |
4914 | netif_poll_enable(dev); | 4917 | #ifdef CONFIG_FORCEDETH_NAPI |
4918 | napi_enable(&np->napi); | ||
4919 | #endif | ||
4915 | 4920 | ||
4916 | if (ret) { | 4921 | if (ret) { |
4917 | netif_carrier_on(dev); | 4922 | netif_carrier_on(dev); |
@@ -4942,7 +4947,9 @@ static int nv_close(struct net_device *dev) | |||
4942 | spin_lock_irq(&np->lock); | 4947 | spin_lock_irq(&np->lock); |
4943 | np->in_shutdown = 1; | 4948 | np->in_shutdown = 1; |
4944 | spin_unlock_irq(&np->lock); | 4949 | spin_unlock_irq(&np->lock); |
4945 | netif_poll_disable(dev); | 4950 | #ifdef CONFIG_FORCEDETH_NAPI |
4951 | napi_disable(&np->napi); | ||
4952 | #endif | ||
4946 | synchronize_irq(dev->irq); | 4953 | synchronize_irq(dev->irq); |
4947 | 4954 | ||
4948 | del_timer_sync(&np->oom_kick); | 4955 | del_timer_sync(&np->oom_kick); |
@@ -4994,6 +5001,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4994 | goto out; | 5001 | goto out; |
4995 | 5002 | ||
4996 | np = netdev_priv(dev); | 5003 | np = netdev_priv(dev); |
5004 | np->dev = dev; | ||
4997 | np->pci_dev = pci_dev; | 5005 | np->pci_dev = pci_dev; |
4998 | spin_lock_init(&np->lock); | 5006 | spin_lock_init(&np->lock); |
4999 | SET_MODULE_OWNER(dev); | 5007 | SET_MODULE_OWNER(dev); |
@@ -5155,9 +5163,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5155 | #ifdef CONFIG_NET_POLL_CONTROLLER | 5163 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5156 | dev->poll_controller = nv_poll_controller; | 5164 | dev->poll_controller = nv_poll_controller; |
5157 | #endif | 5165 | #endif |
5158 | dev->weight = RX_WORK_PER_LOOP; | ||
5159 | #ifdef CONFIG_FORCEDETH_NAPI | 5166 | #ifdef CONFIG_FORCEDETH_NAPI |
5160 | dev->poll = nv_napi_poll; | 5167 | netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); |
5161 | #endif | 5168 | #endif |
5162 | SET_ETHTOOL_OPS(dev, &ops); | 5169 | SET_ETHTOOL_OPS(dev, &ops); |
5163 | dev->tx_timeout = nv_tx_timeout; | 5170 | dev->tx_timeout = nv_tx_timeout; |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index a4a2a0ea43d3..c509cb13222d 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -70,18 +70,16 @@ static void fs_set_multicast_list(struct net_device *dev) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | /* NAPI receive function */ | 72 | /* NAPI receive function */ |
73 | static int fs_enet_rx_napi(struct net_device *dev, int *budget) | 73 | static int fs_enet_rx_napi(struct napi_struct *napi, int budget) |
74 | { | 74 | { |
75 | struct fs_enet_private *fep = netdev_priv(dev); | 75 | struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); |
76 | struct net_device *dev = to_net_dev(fep->dev); | ||
76 | const struct fs_platform_info *fpi = fep->fpi; | 77 | const struct fs_platform_info *fpi = fep->fpi; |
77 | cbd_t *bdp; | 78 | cbd_t *bdp; |
78 | struct sk_buff *skb, *skbn, *skbt; | 79 | struct sk_buff *skb, *skbn, *skbt; |
79 | int received = 0; | 80 | int received = 0; |
80 | u16 pkt_len, sc; | 81 | u16 pkt_len, sc; |
81 | int curidx; | 82 | int curidx; |
82 | int rx_work_limit = 0; /* pacify gcc */ | ||
83 | |||
84 | rx_work_limit = min(dev->quota, *budget); | ||
85 | 83 | ||
86 | if (!netif_running(dev)) | 84 | if (!netif_running(dev)) |
87 | return 0; | 85 | return 0; |
@@ -96,7 +94,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) | |||
96 | (*fep->ops->napi_clear_rx_event)(dev); | 94 | (*fep->ops->napi_clear_rx_event)(dev); |
97 | 95 | ||
98 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | 96 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { |
99 | |||
100 | curidx = bdp - fep->rx_bd_base; | 97 | curidx = bdp - fep->rx_bd_base; |
101 | 98 | ||
102 | /* | 99 | /* |
@@ -136,11 +133,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) | |||
136 | skbn = skb; | 133 | skbn = skb; |
137 | 134 | ||
138 | } else { | 135 | } else { |
139 | |||
140 | /* napi, got packet but no quota */ | ||
141 | if (--rx_work_limit < 0) | ||
142 | break; | ||
143 | |||
144 | skb = fep->rx_skbuff[curidx]; | 136 | skb = fep->rx_skbuff[curidx]; |
145 | 137 | ||
146 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | 138 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
@@ -199,22 +191,19 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) | |||
199 | bdp = fep->rx_bd_base; | 191 | bdp = fep->rx_bd_base; |
200 | 192 | ||
201 | (*fep->ops->rx_bd_done)(dev); | 193 | (*fep->ops->rx_bd_done)(dev); |
194 | |||
195 | if (received >= budget) | ||
196 | break; | ||
202 | } | 197 | } |
203 | 198 | ||
204 | fep->cur_rx = bdp; | 199 | fep->cur_rx = bdp; |
205 | 200 | ||
206 | dev->quota -= received; | 201 | if (received >= budget) { |
207 | *budget -= received; | 202 | /* done */ |
208 | 203 | netif_rx_complete(dev, napi); | |
209 | if (rx_work_limit < 0) | 204 | (*fep->ops->napi_enable_rx)(dev); |
210 | return 1; /* not done */ | 205 | } |
211 | 206 | return received; | |
212 | /* done */ | ||
213 | netif_rx_complete(dev); | ||
214 | |||
215 | (*fep->ops->napi_enable_rx)(dev); | ||
216 | |||
217 | return 0; | ||
218 | } | 207 | } |
219 | 208 | ||
220 | /* non NAPI receive function */ | 209 | /* non NAPI receive function */ |
@@ -470,7 +459,7 @@ fs_enet_interrupt(int irq, void *dev_id) | |||
470 | if (!fpi->use_napi) | 459 | if (!fpi->use_napi) |
471 | fs_enet_rx_non_napi(dev); | 460 | fs_enet_rx_non_napi(dev); |
472 | else { | 461 | else { |
473 | napi_ok = netif_rx_schedule_prep(dev); | 462 | napi_ok = napi_schedule_prep(&fep->napi); |
474 | 463 | ||
475 | (*fep->ops->napi_disable_rx)(dev); | 464 | (*fep->ops->napi_disable_rx)(dev); |
476 | (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); | 465 | (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); |
@@ -478,7 +467,7 @@ fs_enet_interrupt(int irq, void *dev_id) | |||
478 | /* NOTE: it is possible for FCCs in NAPI mode */ | 467 | /* NOTE: it is possible for FCCs in NAPI mode */ |
479 | /* to submit a spurious interrupt while in poll */ | 468 | /* to submit a spurious interrupt while in poll */ |
480 | if (napi_ok) | 469 | if (napi_ok) |
481 | __netif_rx_schedule(dev); | 470 | __netif_rx_schedule(dev, &fep->napi); |
482 | } | 471 | } |
483 | } | 472 | } |
484 | 473 | ||
@@ -799,18 +788,22 @@ static int fs_enet_open(struct net_device *dev) | |||
799 | int r; | 788 | int r; |
800 | int err; | 789 | int err; |
801 | 790 | ||
791 | napi_enable(&fep->napi); | ||
792 | |||
802 | /* Install our interrupt handler. */ | 793 | /* Install our interrupt handler. */ |
803 | r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); | 794 | r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); |
804 | if (r != 0) { | 795 | if (r != 0) { |
805 | printk(KERN_ERR DRV_MODULE_NAME | 796 | printk(KERN_ERR DRV_MODULE_NAME |
806 | ": %s Could not allocate FS_ENET IRQ!", dev->name); | 797 | ": %s Could not allocate FS_ENET IRQ!", dev->name); |
798 | napi_disable(&fep->napi); | ||
807 | return -EINVAL; | 799 | return -EINVAL; |
808 | } | 800 | } |
809 | 801 | ||
810 | err = fs_init_phy(dev); | 802 | err = fs_init_phy(dev); |
811 | if(err) | 803 | if(err) { |
804 | napi_disable(&fep->napi); | ||
812 | return err; | 805 | return err; |
813 | 806 | } | |
814 | phy_start(fep->phydev); | 807 | phy_start(fep->phydev); |
815 | 808 | ||
816 | return 0; | 809 | return 0; |
@@ -823,6 +816,7 @@ static int fs_enet_close(struct net_device *dev) | |||
823 | 816 | ||
824 | netif_stop_queue(dev); | 817 | netif_stop_queue(dev); |
825 | netif_carrier_off(dev); | 818 | netif_carrier_off(dev); |
819 | napi_disable(&fep->napi); | ||
826 | phy_stop(fep->phydev); | 820 | phy_stop(fep->phydev); |
827 | 821 | ||
828 | spin_lock_irqsave(&fep->lock, flags); | 822 | spin_lock_irqsave(&fep->lock, flags); |
@@ -1047,10 +1041,9 @@ static struct net_device *fs_init_instance(struct device *dev, | |||
1047 | ndev->stop = fs_enet_close; | 1041 | ndev->stop = fs_enet_close; |
1048 | ndev->get_stats = fs_enet_get_stats; | 1042 | ndev->get_stats = fs_enet_get_stats; |
1049 | ndev->set_multicast_list = fs_set_multicast_list; | 1043 | ndev->set_multicast_list = fs_set_multicast_list; |
1050 | if (fpi->use_napi) { | 1044 | netif_napi_add(ndev, &fep->napi, |
1051 | ndev->poll = fs_enet_rx_napi; | 1045 | fs_enet_rx_napi, fpi->napi_weight); |
1052 | ndev->weight = fpi->napi_weight; | 1046 | |
1053 | } | ||
1054 | ndev->ethtool_ops = &fs_ethtool_ops; | 1047 | ndev->ethtool_ops = &fs_ethtool_ops; |
1055 | ndev->do_ioctl = fs_ioctl; | 1048 | ndev->do_ioctl = fs_ioctl; |
1056 | 1049 | ||
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h index 569be225cd05..46d0606b1439 100644 --- a/drivers/net/fs_enet/fs_enet.h +++ b/drivers/net/fs_enet/fs_enet.h | |||
@@ -121,6 +121,7 @@ struct fs_enet_mii_bus { | |||
121 | }; | 121 | }; |
122 | 122 | ||
123 | struct fs_enet_private { | 123 | struct fs_enet_private { |
124 | struct napi_struct napi; | ||
124 | struct device *dev; /* pointer back to the device (must be initialized first) */ | 125 | struct device *dev; /* pointer back to the device (must be initialized first) */ |
125 | spinlock_t lock; /* during all ops except TX pckt processing */ | 126 | spinlock_t lock; /* during all ops except TX pckt processing */ |
126 | spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ | 127 | spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index f92690555dd9..bd2de325bbdd 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -134,7 +134,7 @@ static void gfar_configure_serdes(struct net_device *dev); | |||
134 | extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value); | 134 | extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value); |
135 | extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum); | 135 | extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum); |
136 | #ifdef CONFIG_GFAR_NAPI | 136 | #ifdef CONFIG_GFAR_NAPI |
137 | static int gfar_poll(struct net_device *dev, int *budget); | 137 | static int gfar_poll(struct napi_struct *napi, int budget); |
138 | #endif | 138 | #endif |
139 | #ifdef CONFIG_NET_POLL_CONTROLLER | 139 | #ifdef CONFIG_NET_POLL_CONTROLLER |
140 | static void gfar_netpoll(struct net_device *dev); | 140 | static void gfar_netpoll(struct net_device *dev); |
@@ -188,6 +188,7 @@ static int gfar_probe(struct platform_device *pdev) | |||
188 | return -ENOMEM; | 188 | return -ENOMEM; |
189 | 189 | ||
190 | priv = netdev_priv(dev); | 190 | priv = netdev_priv(dev); |
191 | priv->dev = dev; | ||
191 | 192 | ||
192 | /* Set the info in the priv to the current info */ | 193 | /* Set the info in the priv to the current info */ |
193 | priv->einfo = einfo; | 194 | priv->einfo = einfo; |
@@ -261,10 +262,7 @@ static int gfar_probe(struct platform_device *pdev) | |||
261 | dev->hard_start_xmit = gfar_start_xmit; | 262 | dev->hard_start_xmit = gfar_start_xmit; |
262 | dev->tx_timeout = gfar_timeout; | 263 | dev->tx_timeout = gfar_timeout; |
263 | dev->watchdog_timeo = TX_TIMEOUT; | 264 | dev->watchdog_timeo = TX_TIMEOUT; |
264 | #ifdef CONFIG_GFAR_NAPI | 265 | netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); |
265 | dev->poll = gfar_poll; | ||
266 | dev->weight = GFAR_DEV_WEIGHT; | ||
267 | #endif | ||
268 | #ifdef CONFIG_NET_POLL_CONTROLLER | 266 | #ifdef CONFIG_NET_POLL_CONTROLLER |
269 | dev->poll_controller = gfar_netpoll; | 267 | dev->poll_controller = gfar_netpoll; |
270 | #endif | 268 | #endif |
@@ -939,6 +937,8 @@ static int gfar_enet_open(struct net_device *dev) | |||
939 | { | 937 | { |
940 | int err; | 938 | int err; |
941 | 939 | ||
940 | napi_enable(&priv->napi); | ||
941 | |||
942 | /* Initialize a bunch of registers */ | 942 | /* Initialize a bunch of registers */ |
943 | init_registers(dev); | 943 | init_registers(dev); |
944 | 944 | ||
@@ -946,10 +946,14 @@ static int gfar_enet_open(struct net_device *dev) | |||
946 | 946 | ||
947 | err = init_phy(dev); | 947 | err = init_phy(dev); |
948 | 948 | ||
949 | if(err) | 949 | if(err) { |
950 | napi_disable(&priv->napi); | ||
950 | return err; | 951 | return err; |
952 | } | ||
951 | 953 | ||
952 | err = startup_gfar(dev); | 954 | err = startup_gfar(dev); |
955 | if (err) | ||
956 | napi_disable(&priv->napi); | ||
953 | 957 | ||
954 | netif_start_queue(dev); | 958 | netif_start_queue(dev); |
955 | 959 | ||
@@ -1102,6 +1106,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1102 | static int gfar_close(struct net_device *dev) | 1106 | static int gfar_close(struct net_device *dev) |
1103 | { | 1107 | { |
1104 | struct gfar_private *priv = netdev_priv(dev); | 1108 | struct gfar_private *priv = netdev_priv(dev); |
1109 | |||
1110 | napi_disable(&priv->napi); | ||
1111 | |||
1105 | stop_gfar(dev); | 1112 | stop_gfar(dev); |
1106 | 1113 | ||
1107 | /* Disconnect from the PHY */ | 1114 | /* Disconnect from the PHY */ |
@@ -1318,7 +1325,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) | |||
1318 | return NULL; | 1325 | return NULL; |
1319 | 1326 | ||
1320 | alignamount = RXBUF_ALIGNMENT - | 1327 | alignamount = RXBUF_ALIGNMENT - |
1321 | (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)); | 1328 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); |
1322 | 1329 | ||
1323 | /* We need the data buffer to be aligned properly. We will reserve | 1330 | /* We need the data buffer to be aligned properly. We will reserve |
1324 | * as many bytes as needed to align the data properly | 1331 | * as many bytes as needed to align the data properly |
@@ -1390,12 +1397,12 @@ irqreturn_t gfar_receive(int irq, void *dev_id) | |||
1390 | 1397 | ||
1391 | /* support NAPI */ | 1398 | /* support NAPI */ |
1392 | #ifdef CONFIG_GFAR_NAPI | 1399 | #ifdef CONFIG_GFAR_NAPI |
1393 | if (netif_rx_schedule_prep(dev)) { | 1400 | if (netif_rx_schedule_prep(dev, &priv->napi)) { |
1394 | tempval = gfar_read(&priv->regs->imask); | 1401 | tempval = gfar_read(&priv->regs->imask); |
1395 | tempval &= IMASK_RX_DISABLED; | 1402 | tempval &= IMASK_RX_DISABLED; |
1396 | gfar_write(&priv->regs->imask, tempval); | 1403 | gfar_write(&priv->regs->imask, tempval); |
1397 | 1404 | ||
1398 | __netif_rx_schedule(dev); | 1405 | __netif_rx_schedule(dev, &priv->napi); |
1399 | } else { | 1406 | } else { |
1400 | if (netif_msg_rx_err(priv)) | 1407 | if (netif_msg_rx_err(priv)) |
1401 | printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", | 1408 | printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", |
@@ -1569,23 +1576,16 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1569 | } | 1576 | } |
1570 | 1577 | ||
1571 | #ifdef CONFIG_GFAR_NAPI | 1578 | #ifdef CONFIG_GFAR_NAPI |
1572 | static int gfar_poll(struct net_device *dev, int *budget) | 1579 | static int gfar_poll(struct napi_struct *napi, int budget) |
1573 | { | 1580 | { |
1581 | struct gfar_private *priv = container_of(napi, struct gfar_private, napi); | ||
1582 | struct net_device *dev = priv->dev; | ||
1574 | int howmany; | 1583 | int howmany; |
1575 | struct gfar_private *priv = netdev_priv(dev); | ||
1576 | int rx_work_limit = *budget; | ||
1577 | |||
1578 | if (rx_work_limit > dev->quota) | ||
1579 | rx_work_limit = dev->quota; | ||
1580 | 1584 | ||
1581 | howmany = gfar_clean_rx_ring(dev, rx_work_limit); | 1585 | howmany = gfar_clean_rx_ring(dev, budget); |
1582 | 1586 | ||
1583 | dev->quota -= howmany; | 1587 | if (howmany < budget) { |
1584 | rx_work_limit -= howmany; | 1588 | netif_rx_complete(dev, napi); |
1585 | *budget -= howmany; | ||
1586 | |||
1587 | if (rx_work_limit > 0) { | ||
1588 | netif_rx_complete(dev); | ||
1589 | 1589 | ||
1590 | /* Clear the halt bit in RSTAT */ | 1590 | /* Clear the halt bit in RSTAT */ |
1591 | gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); | 1591 | gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); |
@@ -1601,8 +1601,7 @@ static int gfar_poll(struct net_device *dev, int *budget) | |||
1601 | gfar_write(&priv->regs->rxic, 0); | 1601 | gfar_write(&priv->regs->rxic, 0); |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | /* Return 1 if there's more work to do */ | 1604 | return howmany; |
1605 | return (rx_work_limit > 0) ? 0 : 1; | ||
1606 | } | 1605 | } |
1607 | #endif | 1606 | #endif |
1608 | 1607 | ||
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index d8e779c102fa..b8714e00482d 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -691,6 +691,9 @@ struct gfar_private { | |||
691 | /* RX Locked fields */ | 691 | /* RX Locked fields */ |
692 | spinlock_t rxlock; | 692 | spinlock_t rxlock; |
693 | 693 | ||
694 | struct net_device *dev; | ||
695 | struct napi_struct napi; | ||
696 | |||
694 | /* skb array and index */ | 697 | /* skb array and index */ |
695 | struct sk_buff ** rx_skbuff; | 698 | struct sk_buff ** rx_skbuff; |
696 | u16 skb_currx; | 699 | u16 skb_currx; |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index acba90f1638e..78e28ada1e21 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -83,7 +83,7 @@ | |||
83 | static int ibmveth_open(struct net_device *dev); | 83 | static int ibmveth_open(struct net_device *dev); |
84 | static int ibmveth_close(struct net_device *dev); | 84 | static int ibmveth_close(struct net_device *dev); |
85 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 85 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); |
86 | static int ibmveth_poll(struct net_device *dev, int *budget); | 86 | static int ibmveth_poll(struct napi_struct *napi, int budget); |
87 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); | 87 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); |
88 | static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); | 88 | static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); |
89 | static void ibmveth_set_multicast_list(struct net_device *dev); | 89 | static void ibmveth_set_multicast_list(struct net_device *dev); |
@@ -480,6 +480,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
480 | 480 | ||
481 | ibmveth_debug_printk("open starting\n"); | 481 | ibmveth_debug_printk("open starting\n"); |
482 | 482 | ||
483 | napi_enable(&adapter->napi); | ||
484 | |||
483 | for(i = 0; i<IbmVethNumBufferPools; i++) | 485 | for(i = 0; i<IbmVethNumBufferPools; i++) |
484 | rxq_entries += adapter->rx_buff_pool[i].size; | 486 | rxq_entries += adapter->rx_buff_pool[i].size; |
485 | 487 | ||
@@ -489,6 +491,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
489 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { | 491 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { |
490 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); | 492 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); |
491 | ibmveth_cleanup(adapter); | 493 | ibmveth_cleanup(adapter); |
494 | napi_disable(&adapter->napi); | ||
492 | return -ENOMEM; | 495 | return -ENOMEM; |
493 | } | 496 | } |
494 | 497 | ||
@@ -498,6 +501,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
498 | if(!adapter->rx_queue.queue_addr) { | 501 | if(!adapter->rx_queue.queue_addr) { |
499 | ibmveth_error_printk("unable to allocate rx queue pages\n"); | 502 | ibmveth_error_printk("unable to allocate rx queue pages\n"); |
500 | ibmveth_cleanup(adapter); | 503 | ibmveth_cleanup(adapter); |
504 | napi_disable(&adapter->napi); | ||
501 | return -ENOMEM; | 505 | return -ENOMEM; |
502 | } | 506 | } |
503 | 507 | ||
@@ -514,6 +518,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
514 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { | 518 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { |
515 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); | 519 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); |
516 | ibmveth_cleanup(adapter); | 520 | ibmveth_cleanup(adapter); |
521 | napi_disable(&adapter->napi); | ||
517 | return -ENOMEM; | 522 | return -ENOMEM; |
518 | } | 523 | } |
519 | 524 | ||
@@ -545,6 +550,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
545 | rxq_desc.desc, | 550 | rxq_desc.desc, |
546 | mac_address); | 551 | mac_address); |
547 | ibmveth_cleanup(adapter); | 552 | ibmveth_cleanup(adapter); |
553 | napi_disable(&adapter->napi); | ||
548 | return -ENONET; | 554 | return -ENONET; |
549 | } | 555 | } |
550 | 556 | ||
@@ -555,6 +561,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
555 | ibmveth_error_printk("unable to alloc pool\n"); | 561 | ibmveth_error_printk("unable to alloc pool\n"); |
556 | adapter->rx_buff_pool[i].active = 0; | 562 | adapter->rx_buff_pool[i].active = 0; |
557 | ibmveth_cleanup(adapter); | 563 | ibmveth_cleanup(adapter); |
564 | napi_disable(&adapter->napi); | ||
558 | return -ENOMEM ; | 565 | return -ENOMEM ; |
559 | } | 566 | } |
560 | } | 567 | } |
@@ -567,6 +574,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
567 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | 574 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); |
568 | 575 | ||
569 | ibmveth_cleanup(adapter); | 576 | ibmveth_cleanup(adapter); |
577 | napi_disable(&adapter->napi); | ||
570 | return rc; | 578 | return rc; |
571 | } | 579 | } |
572 | 580 | ||
@@ -587,6 +595,8 @@ static int ibmveth_close(struct net_device *netdev) | |||
587 | 595 | ||
588 | ibmveth_debug_printk("close starting\n"); | 596 | ibmveth_debug_printk("close starting\n"); |
589 | 597 | ||
598 | napi_disable(&adapter->napi); | ||
599 | |||
590 | if (!adapter->pool_config) | 600 | if (!adapter->pool_config) |
591 | netif_stop_queue(netdev); | 601 | netif_stop_queue(netdev); |
592 | 602 | ||
@@ -767,80 +777,68 @@ out: spin_lock_irqsave(&adapter->stats_lock, flags); | |||
767 | return 0; | 777 | return 0; |
768 | } | 778 | } |
769 | 779 | ||
770 | static int ibmveth_poll(struct net_device *netdev, int *budget) | 780 | static int ibmveth_poll(struct napi_struct *napi, int budget) |
771 | { | 781 | { |
772 | struct ibmveth_adapter *adapter = netdev->priv; | 782 | struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); |
773 | int max_frames_to_process = netdev->quota; | 783 | struct net_device *netdev = adapter->netdev; |
774 | int frames_processed = 0; | 784 | int frames_processed = 0; |
775 | int more_work = 1; | ||
776 | unsigned long lpar_rc; | 785 | unsigned long lpar_rc; |
777 | 786 | ||
778 | restart_poll: | 787 | restart_poll: |
779 | do { | 788 | do { |
780 | struct net_device *netdev = adapter->netdev; | 789 | struct sk_buff *skb; |
781 | |||
782 | if(ibmveth_rxq_pending_buffer(adapter)) { | ||
783 | struct sk_buff *skb; | ||
784 | 790 | ||
785 | rmb(); | 791 | if (!ibmveth_rxq_pending_buffer(adapter)) |
792 | break; | ||
786 | 793 | ||
787 | if(!ibmveth_rxq_buffer_valid(adapter)) { | 794 | rmb(); |
788 | wmb(); /* suggested by larson1 */ | 795 | if (!ibmveth_rxq_buffer_valid(adapter)) { |
789 | adapter->rx_invalid_buffer++; | 796 | wmb(); /* suggested by larson1 */ |
790 | ibmveth_debug_printk("recycling invalid buffer\n"); | 797 | adapter->rx_invalid_buffer++; |
791 | ibmveth_rxq_recycle_buffer(adapter); | 798 | ibmveth_debug_printk("recycling invalid buffer\n"); |
792 | } else { | 799 | ibmveth_rxq_recycle_buffer(adapter); |
793 | int length = ibmveth_rxq_frame_length(adapter); | 800 | } else { |
794 | int offset = ibmveth_rxq_frame_offset(adapter); | 801 | int length = ibmveth_rxq_frame_length(adapter); |
795 | skb = ibmveth_rxq_get_buffer(adapter); | 802 | int offset = ibmveth_rxq_frame_offset(adapter); |
803 | skb = ibmveth_rxq_get_buffer(adapter); | ||
796 | 804 | ||
797 | ibmveth_rxq_harvest_buffer(adapter); | 805 | ibmveth_rxq_harvest_buffer(adapter); |
798 | 806 | ||
799 | skb_reserve(skb, offset); | 807 | skb_reserve(skb, offset); |
800 | skb_put(skb, length); | 808 | skb_put(skb, length); |
801 | skb->protocol = eth_type_trans(skb, netdev); | 809 | skb->protocol = eth_type_trans(skb, netdev); |
802 | 810 | ||
803 | netif_receive_skb(skb); /* send it up */ | 811 | netif_receive_skb(skb); /* send it up */ |
804 | 812 | ||
805 | adapter->stats.rx_packets++; | 813 | adapter->stats.rx_packets++; |
806 | adapter->stats.rx_bytes += length; | 814 | adapter->stats.rx_bytes += length; |
807 | frames_processed++; | 815 | frames_processed++; |
808 | netdev->last_rx = jiffies; | 816 | netdev->last_rx = jiffies; |
809 | } | ||
810 | } else { | ||
811 | more_work = 0; | ||
812 | } | 817 | } |
813 | } while(more_work && (frames_processed < max_frames_to_process)); | 818 | } while (frames_processed < budget); |
814 | 819 | ||
815 | ibmveth_replenish_task(adapter); | 820 | ibmveth_replenish_task(adapter); |
816 | 821 | ||
817 | if(more_work) { | 822 | if (frames_processed < budget) { |
818 | /* more work to do - return that we are not done yet */ | 823 | /* We think we are done - reenable interrupts, |
819 | netdev->quota -= frames_processed; | 824 | * then check once more to make sure we are done. |
820 | *budget -= frames_processed; | 825 | */ |
821 | return 1; | 826 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
822 | } | 827 | VIO_IRQ_ENABLE); |
823 | |||
824 | /* we think we are done - reenable interrupts, then check once more to make sure we are done */ | ||
825 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); | ||
826 | 828 | ||
827 | ibmveth_assert(lpar_rc == H_SUCCESS); | 829 | ibmveth_assert(lpar_rc == H_SUCCESS); |
828 | 830 | ||
829 | netif_rx_complete(netdev); | 831 | netif_rx_complete(netdev, napi); |
830 | 832 | ||
831 | if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed)) | 833 | if (ibmveth_rxq_pending_buffer(adapter) && |
832 | { | 834 | netif_rx_reschedule(netdev, napi)) { |
833 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); | 835 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
834 | ibmveth_assert(lpar_rc == H_SUCCESS); | 836 | VIO_IRQ_DISABLE); |
835 | more_work = 1; | 837 | goto restart_poll; |
836 | goto restart_poll; | 838 | } |
837 | } | 839 | } |
838 | 840 | ||
839 | netdev->quota -= frames_processed; | 841 | return frames_processed; |
840 | *budget -= frames_processed; | ||
841 | |||
842 | /* we really are done */ | ||
843 | return 0; | ||
844 | } | 842 | } |
845 | 843 | ||
846 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) | 844 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) |
@@ -849,10 +847,11 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) | |||
849 | struct ibmveth_adapter *adapter = netdev->priv; | 847 | struct ibmveth_adapter *adapter = netdev->priv; |
850 | unsigned long lpar_rc; | 848 | unsigned long lpar_rc; |
851 | 849 | ||
852 | if(netif_rx_schedule_prep(netdev)) { | 850 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { |
853 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); | 851 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
852 | VIO_IRQ_DISABLE); | ||
854 | ibmveth_assert(lpar_rc == H_SUCCESS); | 853 | ibmveth_assert(lpar_rc == H_SUCCESS); |
855 | __netif_rx_schedule(netdev); | 854 | __netif_rx_schedule(netdev, &adapter->napi); |
856 | } | 855 | } |
857 | return IRQ_HANDLED; | 856 | return IRQ_HANDLED; |
858 | } | 857 | } |
@@ -1004,6 +1003,8 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1004 | adapter->mcastFilterSize= *mcastFilterSize_p; | 1003 | adapter->mcastFilterSize= *mcastFilterSize_p; |
1005 | adapter->pool_config = 0; | 1004 | adapter->pool_config = 0; |
1006 | 1005 | ||
1006 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); | ||
1007 | |||
1007 | /* Some older boxes running PHYP non-natively have an OF that | 1008 | /* Some older boxes running PHYP non-natively have an OF that |
1008 | returns a 8-byte local-mac-address field (and the first | 1009 | returns a 8-byte local-mac-address field (and the first |
1009 | 2 bytes have to be ignored) while newer boxes' OF return | 1010 | 2 bytes have to be ignored) while newer boxes' OF return |
@@ -1020,8 +1021,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1020 | 1021 | ||
1021 | netdev->irq = dev->irq; | 1022 | netdev->irq = dev->irq; |
1022 | netdev->open = ibmveth_open; | 1023 | netdev->open = ibmveth_open; |
1023 | netdev->poll = ibmveth_poll; | ||
1024 | netdev->weight = 16; | ||
1025 | netdev->stop = ibmveth_close; | 1024 | netdev->stop = ibmveth_close; |
1026 | netdev->hard_start_xmit = ibmveth_start_xmit; | 1025 | netdev->hard_start_xmit = ibmveth_start_xmit; |
1027 | netdev->get_stats = ibmveth_get_stats; | 1026 | netdev->get_stats = ibmveth_get_stats; |
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 72cc15a6cab7..e05694126f85 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h | |||
@@ -112,6 +112,7 @@ struct ibmveth_rx_q { | |||
112 | struct ibmveth_adapter { | 112 | struct ibmveth_adapter { |
113 | struct vio_dev *vdev; | 113 | struct vio_dev *vdev; |
114 | struct net_device *netdev; | 114 | struct net_device *netdev; |
115 | struct napi_struct napi; | ||
115 | struct net_device_stats stats; | 116 | struct net_device_stats stats; |
116 | unsigned int mcastFilterSize; | 117 | unsigned int mcastFilterSize; |
117 | unsigned long mac_addr; | 118 | unsigned long mac_addr; |
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 3569d5b03388..1eee8894c732 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h | |||
@@ -184,6 +184,7 @@ struct ixgb_adapter { | |||
184 | boolean_t rx_csum; | 184 | boolean_t rx_csum; |
185 | 185 | ||
186 | /* OS defined structs */ | 186 | /* OS defined structs */ |
187 | struct napi_struct napi; | ||
187 | struct net_device *netdev; | 188 | struct net_device *netdev; |
188 | struct pci_dev *pdev; | 189 | struct pci_dev *pdev; |
189 | struct net_device_stats net_stats; | 190 | struct net_device_stats net_stats; |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 991c8833e23c..e3f27c67fb28 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -97,7 +97,7 @@ static irqreturn_t ixgb_intr(int irq, void *data); | |||
97 | static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); | 97 | static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); |
98 | 98 | ||
99 | #ifdef CONFIG_IXGB_NAPI | 99 | #ifdef CONFIG_IXGB_NAPI |
100 | static int ixgb_clean(struct net_device *netdev, int *budget); | 100 | static int ixgb_clean(struct napi_struct *napi, int budget); |
101 | static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, | 101 | static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, |
102 | int *work_done, int work_to_do); | 102 | int *work_done, int work_to_do); |
103 | #else | 103 | #else |
@@ -288,7 +288,7 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
288 | mod_timer(&adapter->watchdog_timer, jiffies); | 288 | mod_timer(&adapter->watchdog_timer, jiffies); |
289 | 289 | ||
290 | #ifdef CONFIG_IXGB_NAPI | 290 | #ifdef CONFIG_IXGB_NAPI |
291 | netif_poll_enable(netdev); | 291 | napi_enable(&adapter->napi); |
292 | #endif | 292 | #endif |
293 | ixgb_irq_enable(adapter); | 293 | ixgb_irq_enable(adapter); |
294 | 294 | ||
@@ -309,7 +309,7 @@ ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog) | |||
309 | if(kill_watchdog) | 309 | if(kill_watchdog) |
310 | del_timer_sync(&adapter->watchdog_timer); | 310 | del_timer_sync(&adapter->watchdog_timer); |
311 | #ifdef CONFIG_IXGB_NAPI | 311 | #ifdef CONFIG_IXGB_NAPI |
312 | netif_poll_disable(netdev); | 312 | napi_disable(&adapter->napi); |
313 | #endif | 313 | #endif |
314 | adapter->link_speed = 0; | 314 | adapter->link_speed = 0; |
315 | adapter->link_duplex = 0; | 315 | adapter->link_duplex = 0; |
@@ -421,8 +421,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
421 | netdev->tx_timeout = &ixgb_tx_timeout; | 421 | netdev->tx_timeout = &ixgb_tx_timeout; |
422 | netdev->watchdog_timeo = 5 * HZ; | 422 | netdev->watchdog_timeo = 5 * HZ; |
423 | #ifdef CONFIG_IXGB_NAPI | 423 | #ifdef CONFIG_IXGB_NAPI |
424 | netdev->poll = &ixgb_clean; | 424 | netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); |
425 | netdev->weight = 64; | ||
426 | #endif | 425 | #endif |
427 | netdev->vlan_rx_register = ixgb_vlan_rx_register; | 426 | netdev->vlan_rx_register = ixgb_vlan_rx_register; |
428 | netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid; | 427 | netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid; |
@@ -1746,7 +1745,7 @@ ixgb_intr(int irq, void *data) | |||
1746 | } | 1745 | } |
1747 | 1746 | ||
1748 | #ifdef CONFIG_IXGB_NAPI | 1747 | #ifdef CONFIG_IXGB_NAPI |
1749 | if(netif_rx_schedule_prep(netdev)) { | 1748 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { |
1750 | 1749 | ||
1751 | /* Disable interrupts and register for poll. The flush | 1750 | /* Disable interrupts and register for poll. The flush |
1752 | of the posted write is intentionally left out. | 1751 | of the posted write is intentionally left out. |
@@ -1754,7 +1753,7 @@ ixgb_intr(int irq, void *data) | |||
1754 | 1753 | ||
1755 | atomic_inc(&adapter->irq_sem); | 1754 | atomic_inc(&adapter->irq_sem); |
1756 | IXGB_WRITE_REG(&adapter->hw, IMC, ~0); | 1755 | IXGB_WRITE_REG(&adapter->hw, IMC, ~0); |
1757 | __netif_rx_schedule(netdev); | 1756 | __netif_rx_schedule(netdev, &adapter->napi); |
1758 | } | 1757 | } |
1759 | #else | 1758 | #else |
1760 | /* yes, that is actually a & and it is meant to make sure that | 1759 | /* yes, that is actually a & and it is meant to make sure that |
@@ -1776,27 +1775,23 @@ ixgb_intr(int irq, void *data) | |||
1776 | **/ | 1775 | **/ |
1777 | 1776 | ||
1778 | static int | 1777 | static int |
1779 | ixgb_clean(struct net_device *netdev, int *budget) | 1778 | ixgb_clean(struct napi_struct *napi, int budget) |
1780 | { | 1779 | { |
1781 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 1780 | struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi); |
1782 | int work_to_do = min(*budget, netdev->quota); | 1781 | struct net_device *netdev = adapter->netdev; |
1783 | int tx_cleaned; | 1782 | int tx_cleaned; |
1784 | int work_done = 0; | 1783 | int work_done = 0; |
1785 | 1784 | ||
1786 | tx_cleaned = ixgb_clean_tx_irq(adapter); | 1785 | tx_cleaned = ixgb_clean_tx_irq(adapter); |
1787 | ixgb_clean_rx_irq(adapter, &work_done, work_to_do); | 1786 | ixgb_clean_rx_irq(adapter, &work_done, budget); |
1788 | |||
1789 | *budget -= work_done; | ||
1790 | netdev->quota -= work_done; | ||
1791 | 1787 | ||
1792 | /* if no Tx and not enough Rx work done, exit the polling mode */ | 1788 | /* if no Tx and not enough Rx work done, exit the polling mode */ |
1793 | if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { | 1789 | if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { |
1794 | netif_rx_complete(netdev); | 1790 | netif_rx_complete(netdev, napi); |
1795 | ixgb_irq_enable(adapter); | 1791 | ixgb_irq_enable(adapter); |
1796 | return 0; | ||
1797 | } | 1792 | } |
1798 | 1793 | ||
1799 | return 1; | 1794 | return work_done; |
1800 | } | 1795 | } |
1801 | #endif | 1796 | #endif |
1802 | 1797 | ||
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index d9ce1aef148a..6c0dd49149d0 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c | |||
@@ -74,9 +74,9 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
74 | } | 74 | } |
75 | 75 | ||
76 | 76 | ||
77 | static int ixpdev_rx(struct net_device *dev, int *budget) | 77 | static int ixpdev_rx(struct net_device *dev, int processed, int budget) |
78 | { | 78 | { |
79 | while (*budget > 0) { | 79 | while (processed < budget) { |
80 | struct ixpdev_rx_desc *desc; | 80 | struct ixpdev_rx_desc *desc; |
81 | struct sk_buff *skb; | 81 | struct sk_buff *skb; |
82 | void *buf; | 82 | void *buf; |
@@ -122,29 +122,34 @@ static int ixpdev_rx(struct net_device *dev, int *budget) | |||
122 | 122 | ||
123 | err: | 123 | err: |
124 | ixp2000_reg_write(RING_RX_PENDING, _desc); | 124 | ixp2000_reg_write(RING_RX_PENDING, _desc); |
125 | dev->quota--; | 125 | processed++; |
126 | (*budget)--; | ||
127 | } | 126 | } |
128 | 127 | ||
129 | return 1; | 128 | return processed; |
130 | } | 129 | } |
131 | 130 | ||
132 | /* dev always points to nds[0]. */ | 131 | /* dev always points to nds[0]. */ |
133 | static int ixpdev_poll(struct net_device *dev, int *budget) | 132 | static int ixpdev_poll(struct napi_struct *napi, int budget) |
134 | { | 133 | { |
134 | struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi); | ||
135 | struct net_device *dev = ip->dev; | ||
136 | int rx; | ||
137 | |||
135 | /* @@@ Have to stop polling when nds[0] is administratively | 138 | /* @@@ Have to stop polling when nds[0] is administratively |
136 | * downed while we are polling. */ | 139 | * downed while we are polling. */ |
140 | rx = 0; | ||
137 | do { | 141 | do { |
138 | ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff); | 142 | ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff); |
139 | 143 | ||
140 | if (ixpdev_rx(dev, budget)) | 144 | rx = ixpdev_rx(dev, rx, budget); |
141 | return 1; | 145 | if (rx >= budget) |
146 | break; | ||
142 | } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); | 147 | } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); |
143 | 148 | ||
144 | netif_rx_complete(dev); | 149 | netif_rx_complete(dev, napi); |
145 | ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); | 150 | ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); |
146 | 151 | ||
147 | return 0; | 152 | return rx; |
148 | } | 153 | } |
149 | 154 | ||
150 | static void ixpdev_tx_complete(void) | 155 | static void ixpdev_tx_complete(void) |
@@ -199,9 +204,12 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id) | |||
199 | * Any of the eight receive units signaled RX? | 204 | * Any of the eight receive units signaled RX? |
200 | */ | 205 | */ |
201 | if (status & 0x00ff) { | 206 | if (status & 0x00ff) { |
207 | struct net_device *dev = nds[0]; | ||
208 | struct ixpdev_priv *ip = netdev_priv(dev); | ||
209 | |||
202 | ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); | 210 | ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); |
203 | if (likely(__netif_rx_schedule_prep(nds[0]))) { | 211 | if (likely(napi_schedule_prep(&ip->napi))) { |
204 | __netif_rx_schedule(nds[0]); | 212 | __netif_rx_schedule(dev, &ip->napi); |
205 | } else { | 213 | } else { |
206 | printk(KERN_CRIT "ixp2000: irq while polling!!\n"); | 214 | printk(KERN_CRIT "ixp2000: irq while polling!!\n"); |
207 | } | 215 | } |
@@ -232,11 +240,13 @@ static int ixpdev_open(struct net_device *dev) | |||
232 | struct ixpdev_priv *ip = netdev_priv(dev); | 240 | struct ixpdev_priv *ip = netdev_priv(dev); |
233 | int err; | 241 | int err; |
234 | 242 | ||
243 | napi_enable(&ip->napi); | ||
235 | if (!nds_open++) { | 244 | if (!nds_open++) { |
236 | err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt, | 245 | err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt, |
237 | IRQF_SHARED, "ixp2000_eth", nds); | 246 | IRQF_SHARED, "ixp2000_eth", nds); |
238 | if (err) { | 247 | if (err) { |
239 | nds_open--; | 248 | nds_open--; |
249 | napi_disable(&ip->napi); | ||
240 | return err; | 250 | return err; |
241 | } | 251 | } |
242 | 252 | ||
@@ -254,6 +264,7 @@ static int ixpdev_close(struct net_device *dev) | |||
254 | struct ixpdev_priv *ip = netdev_priv(dev); | 264 | struct ixpdev_priv *ip = netdev_priv(dev); |
255 | 265 | ||
256 | netif_stop_queue(dev); | 266 | netif_stop_queue(dev); |
267 | napi_disable(&ip->napi); | ||
257 | set_port_admin_status(ip->channel, 0); | 268 | set_port_admin_status(ip->channel, 0); |
258 | 269 | ||
259 | if (!--nds_open) { | 270 | if (!--nds_open) { |
@@ -274,7 +285,6 @@ struct net_device *ixpdev_alloc(int channel, int sizeof_priv) | |||
274 | return NULL; | 285 | return NULL; |
275 | 286 | ||
276 | dev->hard_start_xmit = ixpdev_xmit; | 287 | dev->hard_start_xmit = ixpdev_xmit; |
277 | dev->poll = ixpdev_poll; | ||
278 | dev->open = ixpdev_open; | 288 | dev->open = ixpdev_open; |
279 | dev->stop = ixpdev_close; | 289 | dev->stop = ixpdev_close; |
280 | #ifdef CONFIG_NET_POLL_CONTROLLER | 290 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -282,9 +292,10 @@ struct net_device *ixpdev_alloc(int channel, int sizeof_priv) | |||
282 | #endif | 292 | #endif |
283 | 293 | ||
284 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | 294 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; |
285 | dev->weight = 64; | ||
286 | 295 | ||
287 | ip = netdev_priv(dev); | 296 | ip = netdev_priv(dev); |
297 | ip->dev = dev; | ||
298 | netif_napi_add(dev, &ip->napi, ixpdev_poll, 64); | ||
288 | ip->channel = channel; | 299 | ip->channel = channel; |
289 | ip->tx_queue_entries = 0; | 300 | ip->tx_queue_entries = 0; |
290 | 301 | ||
diff --git a/drivers/net/ixp2000/ixpdev.h b/drivers/net/ixp2000/ixpdev.h index bd686cb63058..391ece623243 100644 --- a/drivers/net/ixp2000/ixpdev.h +++ b/drivers/net/ixp2000/ixpdev.h | |||
@@ -14,6 +14,8 @@ | |||
14 | 14 | ||
15 | struct ixpdev_priv | 15 | struct ixpdev_priv |
16 | { | 16 | { |
17 | struct net_device *dev; | ||
18 | struct napi_struct napi; | ||
17 | int channel; | 19 | int channel; |
18 | int tx_queue_entries; | 20 | int tx_queue_entries; |
19 | }; | 21 | }; |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index a4bb0264180a..74c3f7a7ae4a 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -470,47 +470,41 @@ static int macb_rx(struct macb *bp, int budget) | |||
470 | return received; | 470 | return received; |
471 | } | 471 | } |
472 | 472 | ||
473 | static int macb_poll(struct net_device *dev, int *budget) | 473 | static int macb_poll(struct napi_struct *napi, int budget) |
474 | { | 474 | { |
475 | struct macb *bp = netdev_priv(dev); | 475 | struct macb *bp = container_of(napi, struct macb, napi); |
476 | int orig_budget, work_done, retval = 0; | 476 | struct net_device *dev = bp->dev; |
477 | int work_done; | ||
477 | u32 status; | 478 | u32 status; |
478 | 479 | ||
479 | status = macb_readl(bp, RSR); | 480 | status = macb_readl(bp, RSR); |
480 | macb_writel(bp, RSR, status); | 481 | macb_writel(bp, RSR, status); |
481 | 482 | ||
483 | work_done = 0; | ||
482 | if (!status) { | 484 | if (!status) { |
483 | /* | 485 | /* |
484 | * This may happen if an interrupt was pending before | 486 | * This may happen if an interrupt was pending before |
485 | * this function was called last time, and no packets | 487 | * this function was called last time, and no packets |
486 | * have been received since. | 488 | * have been received since. |
487 | */ | 489 | */ |
488 | netif_rx_complete(dev); | 490 | netif_rx_complete(dev, napi); |
489 | goto out; | 491 | goto out; |
490 | } | 492 | } |
491 | 493 | ||
492 | dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", | 494 | dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", |
493 | (unsigned long)status, *budget); | 495 | (unsigned long)status, budget); |
494 | 496 | ||
495 | if (!(status & MACB_BIT(REC))) { | 497 | if (!(status & MACB_BIT(REC))) { |
496 | dev_warn(&bp->pdev->dev, | 498 | dev_warn(&bp->pdev->dev, |
497 | "No RX buffers complete, status = %02lx\n", | 499 | "No RX buffers complete, status = %02lx\n", |
498 | (unsigned long)status); | 500 | (unsigned long)status); |
499 | netif_rx_complete(dev); | 501 | netif_rx_complete(dev, napi); |
500 | goto out; | 502 | goto out; |
501 | } | 503 | } |
502 | 504 | ||
503 | orig_budget = *budget; | 505 | work_done = macb_rx(bp, budget); |
504 | if (orig_budget > dev->quota) | 506 | if (work_done < budget) |
505 | orig_budget = dev->quota; | 507 | netif_rx_complete(dev, napi); |
506 | |||
507 | work_done = macb_rx(bp, orig_budget); | ||
508 | if (work_done < orig_budget) { | ||
509 | netif_rx_complete(dev); | ||
510 | retval = 0; | ||
511 | } else { | ||
512 | retval = 1; | ||
513 | } | ||
514 | 508 | ||
515 | /* | 509 | /* |
516 | * We've done what we can to clean the buffers. Make sure we | 510 | * We've done what we can to clean the buffers. Make sure we |
@@ -521,7 +515,7 @@ out: | |||
521 | 515 | ||
522 | /* TODO: Handle errors */ | 516 | /* TODO: Handle errors */ |
523 | 517 | ||
524 | return retval; | 518 | return work_done; |
525 | } | 519 | } |
526 | 520 | ||
527 | static irqreturn_t macb_interrupt(int irq, void *dev_id) | 521 | static irqreturn_t macb_interrupt(int irq, void *dev_id) |
@@ -545,7 +539,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
545 | } | 539 | } |
546 | 540 | ||
547 | if (status & MACB_RX_INT_FLAGS) { | 541 | if (status & MACB_RX_INT_FLAGS) { |
548 | if (netif_rx_schedule_prep(dev)) { | 542 | if (netif_rx_schedule_prep(dev, &bp->napi)) { |
549 | /* | 543 | /* |
550 | * There's no point taking any more interrupts | 544 | * There's no point taking any more interrupts |
551 | * until we have processed the buffers | 545 | * until we have processed the buffers |
@@ -553,7 +547,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
553 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); | 547 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); |
554 | dev_dbg(&bp->pdev->dev, | 548 | dev_dbg(&bp->pdev->dev, |
555 | "scheduling RX softirq\n"); | 549 | "scheduling RX softirq\n"); |
556 | __netif_rx_schedule(dev); | 550 | __netif_rx_schedule(dev, &bp->napi); |
557 | } | 551 | } |
558 | } | 552 | } |
559 | 553 | ||
@@ -937,6 +931,8 @@ static int macb_open(struct net_device *dev) | |||
937 | return err; | 931 | return err; |
938 | } | 932 | } |
939 | 933 | ||
934 | napi_enable(&bp->napi); | ||
935 | |||
940 | macb_init_rings(bp); | 936 | macb_init_rings(bp); |
941 | macb_init_hw(bp); | 937 | macb_init_hw(bp); |
942 | 938 | ||
@@ -954,6 +950,7 @@ static int macb_close(struct net_device *dev) | |||
954 | unsigned long flags; | 950 | unsigned long flags; |
955 | 951 | ||
956 | netif_stop_queue(dev); | 952 | netif_stop_queue(dev); |
953 | napi_disable(&bp->napi); | ||
957 | 954 | ||
958 | if (bp->phy_dev) | 955 | if (bp->phy_dev) |
959 | phy_stop(bp->phy_dev); | 956 | phy_stop(bp->phy_dev); |
@@ -1146,8 +1143,7 @@ static int __devinit macb_probe(struct platform_device *pdev) | |||
1146 | dev->get_stats = macb_get_stats; | 1143 | dev->get_stats = macb_get_stats; |
1147 | dev->set_multicast_list = macb_set_rx_mode; | 1144 | dev->set_multicast_list = macb_set_rx_mode; |
1148 | dev->do_ioctl = macb_ioctl; | 1145 | dev->do_ioctl = macb_ioctl; |
1149 | dev->poll = macb_poll; | 1146 | netif_napi_add(dev, &bp->napi, macb_poll, 64); |
1150 | dev->weight = 64; | ||
1151 | dev->ethtool_ops = &macb_ethtool_ops; | 1147 | dev->ethtool_ops = &macb_ethtool_ops; |
1152 | 1148 | ||
1153 | dev->base_addr = regs->start; | 1149 | dev->base_addr = regs->start; |
diff --git a/drivers/net/macb.h b/drivers/net/macb.h index 4e3283ebd97c..57b85acf0d16 100644 --- a/drivers/net/macb.h +++ b/drivers/net/macb.h | |||
@@ -374,6 +374,7 @@ struct macb { | |||
374 | struct clk *pclk; | 374 | struct clk *pclk; |
375 | struct clk *hclk; | 375 | struct clk *hclk; |
376 | struct net_device *dev; | 376 | struct net_device *dev; |
377 | struct napi_struct napi; | ||
377 | struct net_device_stats stats; | 378 | struct net_device_stats stats; |
378 | struct macb_stats hw_stats; | 379 | struct macb_stats hw_stats; |
379 | 380 | ||
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 315335671f0f..702eba549161 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -66,7 +66,7 @@ static int mv643xx_eth_change_mtu(struct net_device *, int); | |||
66 | static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); | 66 | static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); |
67 | static void eth_port_init_mac_tables(unsigned int eth_port_num); | 67 | static void eth_port_init_mac_tables(unsigned int eth_port_num); |
68 | #ifdef MV643XX_NAPI | 68 | #ifdef MV643XX_NAPI |
69 | static int mv643xx_poll(struct net_device *dev, int *budget); | 69 | static int mv643xx_poll(struct napi_struct *napi, int budget); |
70 | #endif | 70 | #endif |
71 | static int ethernet_phy_get(unsigned int eth_port_num); | 71 | static int ethernet_phy_get(unsigned int eth_port_num); |
72 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | 72 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); |
@@ -562,7 +562,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
562 | /* wait for previous write to complete */ | 562 | /* wait for previous write to complete */ |
563 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 563 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
564 | 564 | ||
565 | netif_rx_schedule(dev); | 565 | netif_rx_schedule(dev, &mp->napi); |
566 | } | 566 | } |
567 | #else | 567 | #else |
568 | if (eth_int_cause & ETH_INT_CAUSE_RX) | 568 | if (eth_int_cause & ETH_INT_CAUSE_RX) |
@@ -880,6 +880,10 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
880 | 880 | ||
881 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ | 881 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ |
882 | 882 | ||
883 | #ifdef MV643XX_NAPI | ||
884 | napi_enable(&mp->napi); | ||
885 | #endif | ||
886 | |||
883 | eth_port_start(dev); | 887 | eth_port_start(dev); |
884 | 888 | ||
885 | /* Interrupt Coalescing */ | 889 | /* Interrupt Coalescing */ |
@@ -982,7 +986,7 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
982 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 986 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
983 | 987 | ||
984 | #ifdef MV643XX_NAPI | 988 | #ifdef MV643XX_NAPI |
985 | netif_poll_disable(dev); | 989 | napi_disable(&mp->napi); |
986 | #endif | 990 | #endif |
987 | netif_carrier_off(dev); | 991 | netif_carrier_off(dev); |
988 | netif_stop_queue(dev); | 992 | netif_stop_queue(dev); |
@@ -992,10 +996,6 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
992 | mv643xx_eth_free_tx_rings(dev); | 996 | mv643xx_eth_free_tx_rings(dev); |
993 | mv643xx_eth_free_rx_rings(dev); | 997 | mv643xx_eth_free_rx_rings(dev); |
994 | 998 | ||
995 | #ifdef MV643XX_NAPI | ||
996 | netif_poll_enable(dev); | ||
997 | #endif | ||
998 | |||
999 | free_irq(dev->irq, dev); | 999 | free_irq(dev->irq, dev); |
1000 | 1000 | ||
1001 | return 0; | 1001 | return 0; |
@@ -1007,11 +1007,12 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
1007 | * | 1007 | * |
1008 | * This function is used in case of NAPI | 1008 | * This function is used in case of NAPI |
1009 | */ | 1009 | */ |
1010 | static int mv643xx_poll(struct net_device *dev, int *budget) | 1010 | static int mv643xx_poll(struct napi_struct *napi, int budget) |
1011 | { | 1011 | { |
1012 | struct mv643xx_private *mp = netdev_priv(dev); | 1012 | struct mv643xx_private *mp = container_of(napi, struct mv643xx_private, napi); |
1013 | int done = 1, orig_budget, work_done; | 1013 | struct net_device *dev = mp->dev; |
1014 | unsigned int port_num = mp->port_num; | 1014 | unsigned int port_num = mp->port_num; |
1015 | int work_done; | ||
1015 | 1016 | ||
1016 | #ifdef MV643XX_TX_FAST_REFILL | 1017 | #ifdef MV643XX_TX_FAST_REFILL |
1017 | if (++mp->tx_clean_threshold > 5) { | 1018 | if (++mp->tx_clean_threshold > 5) { |
@@ -1020,27 +1021,20 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1020 | } | 1021 | } |
1021 | #endif | 1022 | #endif |
1022 | 1023 | ||
1024 | work_done = 0; | ||
1023 | if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) | 1025 | if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) |
1024 | != (u32) mp->rx_used_desc_q) { | 1026 | != (u32) mp->rx_used_desc_q) |
1025 | orig_budget = *budget; | 1027 | work_done = mv643xx_eth_receive_queue(dev, budget); |
1026 | if (orig_budget > dev->quota) | ||
1027 | orig_budget = dev->quota; | ||
1028 | work_done = mv643xx_eth_receive_queue(dev, orig_budget); | ||
1029 | *budget -= work_done; | ||
1030 | dev->quota -= work_done; | ||
1031 | if (work_done >= orig_budget) | ||
1032 | done = 0; | ||
1033 | } | ||
1034 | 1028 | ||
1035 | if (done) { | 1029 | if (work_done < budget) { |
1036 | netif_rx_complete(dev); | 1030 | netif_rx_complete(dev, napi); |
1037 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | 1031 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); |
1038 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1032 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1039 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 1033 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), |
1040 | ETH_INT_UNMASK_ALL); | 1034 | ETH_INT_UNMASK_ALL); |
1041 | } | 1035 | } |
1042 | 1036 | ||
1043 | return done ? 0 : 1; | 1037 | return work_done; |
1044 | } | 1038 | } |
1045 | #endif | 1039 | #endif |
1046 | 1040 | ||
@@ -1333,6 +1327,10 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1333 | platform_set_drvdata(pdev, dev); | 1327 | platform_set_drvdata(pdev, dev); |
1334 | 1328 | ||
1335 | mp = netdev_priv(dev); | 1329 | mp = netdev_priv(dev); |
1330 | mp->dev = dev; | ||
1331 | #ifdef MV643XX_NAPI | ||
1332 | netif_napi_add(dev, &mp->napi, mv643xx_poll, 64); | ||
1333 | #endif | ||
1336 | 1334 | ||
1337 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1335 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1338 | BUG_ON(!res); | 1336 | BUG_ON(!res); |
@@ -1347,10 +1345,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1347 | 1345 | ||
1348 | /* No need to Tx Timeout */ | 1346 | /* No need to Tx Timeout */ |
1349 | dev->tx_timeout = mv643xx_eth_tx_timeout; | 1347 | dev->tx_timeout = mv643xx_eth_tx_timeout; |
1350 | #ifdef MV643XX_NAPI | ||
1351 | dev->poll = mv643xx_poll; | ||
1352 | dev->weight = 64; | ||
1353 | #endif | ||
1354 | 1348 | ||
1355 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1349 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1356 | dev->poll_controller = mv643xx_netpoll; | 1350 | dev->poll_controller = mv643xx_netpoll; |
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h index 565b96696aca..be669eb23788 100644 --- a/drivers/net/mv643xx_eth.h +++ b/drivers/net/mv643xx_eth.h | |||
@@ -320,6 +320,8 @@ struct mv643xx_private { | |||
320 | 320 | ||
321 | struct work_struct tx_timeout_task; | 321 | struct work_struct tx_timeout_task; |
322 | 322 | ||
323 | struct net_device *dev; | ||
324 | struct napi_struct napi; | ||
323 | struct net_device_stats stats; | 325 | struct net_device_stats stats; |
324 | struct mv643xx_mib_counters mib_counters; | 326 | struct mv643xx_mib_counters mib_counters; |
325 | spinlock_t lock; | 327 | spinlock_t lock; |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 556962f9612d..a30146ea51f0 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -163,6 +163,7 @@ struct myri10ge_priv { | |||
163 | int small_bytes; | 163 | int small_bytes; |
164 | int big_bytes; | 164 | int big_bytes; |
165 | struct net_device *dev; | 165 | struct net_device *dev; |
166 | struct napi_struct napi; | ||
166 | struct net_device_stats stats; | 167 | struct net_device_stats stats; |
167 | u8 __iomem *sram; | 168 | u8 __iomem *sram; |
168 | int sram_size; | 169 | int sram_size; |
@@ -1100,7 +1101,7 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) | |||
1100 | } | 1101 | } |
1101 | } | 1102 | } |
1102 | 1103 | ||
1103 | static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) | 1104 | static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) |
1104 | { | 1105 | { |
1105 | struct myri10ge_rx_done *rx_done = &mgp->rx_done; | 1106 | struct myri10ge_rx_done *rx_done = &mgp->rx_done; |
1106 | unsigned long rx_bytes = 0; | 1107 | unsigned long rx_bytes = 0; |
@@ -1109,10 +1110,11 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) | |||
1109 | 1110 | ||
1110 | int idx = rx_done->idx; | 1111 | int idx = rx_done->idx; |
1111 | int cnt = rx_done->cnt; | 1112 | int cnt = rx_done->cnt; |
1113 | int work_done = 0; | ||
1112 | u16 length; | 1114 | u16 length; |
1113 | __wsum checksum; | 1115 | __wsum checksum; |
1114 | 1116 | ||
1115 | while (rx_done->entry[idx].length != 0 && *limit != 0) { | 1117 | while (rx_done->entry[idx].length != 0 && work_done++ < budget) { |
1116 | length = ntohs(rx_done->entry[idx].length); | 1118 | length = ntohs(rx_done->entry[idx].length); |
1117 | rx_done->entry[idx].length = 0; | 1119 | rx_done->entry[idx].length = 0; |
1118 | checksum = csum_unfold(rx_done->entry[idx].checksum); | 1120 | checksum = csum_unfold(rx_done->entry[idx].checksum); |
@@ -1128,10 +1130,6 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) | |||
1128 | rx_bytes += rx_ok * (unsigned long)length; | 1130 | rx_bytes += rx_ok * (unsigned long)length; |
1129 | cnt++; | 1131 | cnt++; |
1130 | idx = cnt & (myri10ge_max_intr_slots - 1); | 1132 | idx = cnt & (myri10ge_max_intr_slots - 1); |
1131 | |||
1132 | /* limit potential for livelock by only handling a | ||
1133 | * limited number of frames. */ | ||
1134 | (*limit)--; | ||
1135 | } | 1133 | } |
1136 | rx_done->idx = idx; | 1134 | rx_done->idx = idx; |
1137 | rx_done->cnt = cnt; | 1135 | rx_done->cnt = cnt; |
@@ -1145,6 +1143,7 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) | |||
1145 | if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) | 1143 | if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) |
1146 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); | 1144 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); |
1147 | 1145 | ||
1146 | return work_done; | ||
1148 | } | 1147 | } |
1149 | 1148 | ||
1150 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | 1149 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) |
@@ -1189,26 +1188,21 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | |||
1189 | } | 1188 | } |
1190 | } | 1189 | } |
1191 | 1190 | ||
1192 | static int myri10ge_poll(struct net_device *netdev, int *budget) | 1191 | static int myri10ge_poll(struct napi_struct *napi, int budget) |
1193 | { | 1192 | { |
1194 | struct myri10ge_priv *mgp = netdev_priv(netdev); | 1193 | struct myri10ge_priv *mgp = container_of(napi, struct myri10ge_priv, napi); |
1194 | struct net_device *netdev = mgp->dev; | ||
1195 | struct myri10ge_rx_done *rx_done = &mgp->rx_done; | 1195 | struct myri10ge_rx_done *rx_done = &mgp->rx_done; |
1196 | int limit, orig_limit, work_done; | 1196 | int work_done; |
1197 | 1197 | ||
1198 | /* process as many rx events as NAPI will allow */ | 1198 | /* process as many rx events as NAPI will allow */ |
1199 | limit = min(*budget, netdev->quota); | 1199 | work_done = myri10ge_clean_rx_done(mgp, budget); |
1200 | orig_limit = limit; | ||
1201 | myri10ge_clean_rx_done(mgp, &limit); | ||
1202 | work_done = orig_limit - limit; | ||
1203 | *budget -= work_done; | ||
1204 | netdev->quota -= work_done; | ||
1205 | 1200 | ||
1206 | if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) { | 1201 | if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) { |
1207 | netif_rx_complete(netdev); | 1202 | netif_rx_complete(netdev, napi); |
1208 | put_be32(htonl(3), mgp->irq_claim); | 1203 | put_be32(htonl(3), mgp->irq_claim); |
1209 | return 0; | ||
1210 | } | 1204 | } |
1211 | return 1; | 1205 | return work_done; |
1212 | } | 1206 | } |
1213 | 1207 | ||
1214 | static irqreturn_t myri10ge_intr(int irq, void *arg) | 1208 | static irqreturn_t myri10ge_intr(int irq, void *arg) |
@@ -1226,7 +1220,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
1226 | /* low bit indicates receives are present, so schedule | 1220 | /* low bit indicates receives are present, so schedule |
1227 | * napi poll handler */ | 1221 | * napi poll handler */ |
1228 | if (stats->valid & 1) | 1222 | if (stats->valid & 1) |
1229 | netif_rx_schedule(mgp->dev); | 1223 | netif_rx_schedule(mgp->dev, &mgp->napi); |
1230 | 1224 | ||
1231 | if (!mgp->msi_enabled) { | 1225 | if (!mgp->msi_enabled) { |
1232 | put_be32(0, mgp->irq_deassert); | 1226 | put_be32(0, mgp->irq_deassert); |
@@ -1853,7 +1847,7 @@ static int myri10ge_open(struct net_device *dev) | |||
1853 | mgp->link_state = htonl(~0U); | 1847 | mgp->link_state = htonl(~0U); |
1854 | mgp->rdma_tags_available = 15; | 1848 | mgp->rdma_tags_available = 15; |
1855 | 1849 | ||
1856 | netif_poll_enable(mgp->dev); /* must happen prior to any irq */ | 1850 | napi_enable(&mgp->napi); /* must happen prior to any irq */ |
1857 | 1851 | ||
1858 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); | 1852 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); |
1859 | if (status) { | 1853 | if (status) { |
@@ -1897,7 +1891,7 @@ static int myri10ge_close(struct net_device *dev) | |||
1897 | 1891 | ||
1898 | del_timer_sync(&mgp->watchdog_timer); | 1892 | del_timer_sync(&mgp->watchdog_timer); |
1899 | mgp->running = MYRI10GE_ETH_STOPPING; | 1893 | mgp->running = MYRI10GE_ETH_STOPPING; |
1900 | netif_poll_disable(mgp->dev); | 1894 | napi_disable(&mgp->napi); |
1901 | netif_carrier_off(dev); | 1895 | netif_carrier_off(dev); |
1902 | netif_stop_queue(dev); | 1896 | netif_stop_queue(dev); |
1903 | old_down_cnt = mgp->down_cnt; | 1897 | old_down_cnt = mgp->down_cnt; |
@@ -2857,6 +2851,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2857 | mgp = netdev_priv(netdev); | 2851 | mgp = netdev_priv(netdev); |
2858 | memset(mgp, 0, sizeof(*mgp)); | 2852 | memset(mgp, 0, sizeof(*mgp)); |
2859 | mgp->dev = netdev; | 2853 | mgp->dev = netdev; |
2854 | netif_napi_add(netdev, &mgp->napi, | ||
2855 | myri10ge_poll, myri10ge_napi_weight); | ||
2860 | mgp->pdev = pdev; | 2856 | mgp->pdev = pdev; |
2861 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; | 2857 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; |
2862 | mgp->pause = myri10ge_flow_control; | 2858 | mgp->pause = myri10ge_flow_control; |
@@ -2981,8 +2977,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2981 | netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; | 2977 | netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; |
2982 | if (dac_enabled) | 2978 | if (dac_enabled) |
2983 | netdev->features |= NETIF_F_HIGHDMA; | 2979 | netdev->features |= NETIF_F_HIGHDMA; |
2984 | netdev->poll = myri10ge_poll; | ||
2985 | netdev->weight = myri10ge_napi_weight; | ||
2986 | 2980 | ||
2987 | /* make sure we can get an irq, and that MSI can be | 2981 | /* make sure we can get an irq, and that MSI can be |
2988 | * setup (if available). Also ensure netdev->irq | 2982 | * setup (if available). Also ensure netdev->irq |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index b47a12d684f9..43cfa4b3e294 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -560,6 +560,8 @@ struct netdev_private { | |||
560 | /* address of a sent-in-place packet/buffer, for later free() */ | 560 | /* address of a sent-in-place packet/buffer, for later free() */ |
561 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | 561 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; |
562 | dma_addr_t tx_dma[TX_RING_SIZE]; | 562 | dma_addr_t tx_dma[TX_RING_SIZE]; |
563 | struct net_device *dev; | ||
564 | struct napi_struct napi; | ||
563 | struct net_device_stats stats; | 565 | struct net_device_stats stats; |
564 | /* Media monitoring timer */ | 566 | /* Media monitoring timer */ |
565 | struct timer_list timer; | 567 | struct timer_list timer; |
@@ -636,7 +638,7 @@ static void init_registers(struct net_device *dev); | |||
636 | static int start_tx(struct sk_buff *skb, struct net_device *dev); | 638 | static int start_tx(struct sk_buff *skb, struct net_device *dev); |
637 | static irqreturn_t intr_handler(int irq, void *dev_instance); | 639 | static irqreturn_t intr_handler(int irq, void *dev_instance); |
638 | static void netdev_error(struct net_device *dev, int intr_status); | 640 | static void netdev_error(struct net_device *dev, int intr_status); |
639 | static int natsemi_poll(struct net_device *dev, int *budget); | 641 | static int natsemi_poll(struct napi_struct *napi, int budget); |
640 | static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); | 642 | static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); |
641 | static void netdev_tx_done(struct net_device *dev); | 643 | static void netdev_tx_done(struct net_device *dev); |
642 | static int natsemi_change_mtu(struct net_device *dev, int new_mtu); | 644 | static int natsemi_change_mtu(struct net_device *dev, int new_mtu); |
@@ -861,6 +863,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
861 | dev->irq = irq; | 863 | dev->irq = irq; |
862 | 864 | ||
863 | np = netdev_priv(dev); | 865 | np = netdev_priv(dev); |
866 | netif_napi_add(dev, &np->napi, natsemi_poll, 64); | ||
864 | 867 | ||
865 | np->pci_dev = pdev; | 868 | np->pci_dev = pdev; |
866 | pci_set_drvdata(pdev, dev); | 869 | pci_set_drvdata(pdev, dev); |
@@ -931,8 +934,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
931 | dev->do_ioctl = &netdev_ioctl; | 934 | dev->do_ioctl = &netdev_ioctl; |
932 | dev->tx_timeout = &tx_timeout; | 935 | dev->tx_timeout = &tx_timeout; |
933 | dev->watchdog_timeo = TX_TIMEOUT; | 936 | dev->watchdog_timeo = TX_TIMEOUT; |
934 | dev->poll = natsemi_poll; | ||
935 | dev->weight = 64; | ||
936 | 937 | ||
937 | #ifdef CONFIG_NET_POLL_CONTROLLER | 938 | #ifdef CONFIG_NET_POLL_CONTROLLER |
938 | dev->poll_controller = &natsemi_poll_controller; | 939 | dev->poll_controller = &natsemi_poll_controller; |
@@ -1554,6 +1555,8 @@ static int netdev_open(struct net_device *dev) | |||
1554 | free_irq(dev->irq, dev); | 1555 | free_irq(dev->irq, dev); |
1555 | return i; | 1556 | return i; |
1556 | } | 1557 | } |
1558 | napi_enable(&np->napi); | ||
1559 | |||
1557 | init_ring(dev); | 1560 | init_ring(dev); |
1558 | spin_lock_irq(&np->lock); | 1561 | spin_lock_irq(&np->lock); |
1559 | init_registers(dev); | 1562 | init_registers(dev); |
@@ -2200,10 +2203,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
2200 | 2203 | ||
2201 | prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); | 2204 | prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); |
2202 | 2205 | ||
2203 | if (netif_rx_schedule_prep(dev)) { | 2206 | if (netif_rx_schedule_prep(dev, &np->napi)) { |
2204 | /* Disable interrupts and register for poll */ | 2207 | /* Disable interrupts and register for poll */ |
2205 | natsemi_irq_disable(dev); | 2208 | natsemi_irq_disable(dev); |
2206 | __netif_rx_schedule(dev); | 2209 | __netif_rx_schedule(dev, &np->napi); |
2207 | } else | 2210 | } else |
2208 | printk(KERN_WARNING | 2211 | printk(KERN_WARNING |
2209 | "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", | 2212 | "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", |
@@ -2216,12 +2219,11 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
2216 | /* This is the NAPI poll routine. As well as the standard RX handling | 2219 | /* This is the NAPI poll routine. As well as the standard RX handling |
2217 | * it also handles all other interrupts that the chip might raise. | 2220 | * it also handles all other interrupts that the chip might raise. |
2218 | */ | 2221 | */ |
2219 | static int natsemi_poll(struct net_device *dev, int *budget) | 2222 | static int natsemi_poll(struct napi_struct *napi, int budget) |
2220 | { | 2223 | { |
2221 | struct netdev_private *np = netdev_priv(dev); | 2224 | struct netdev_private *np = container_of(napi, struct netdev_private, napi); |
2225 | struct net_device *dev = np->dev; | ||
2222 | void __iomem * ioaddr = ns_ioaddr(dev); | 2226 | void __iomem * ioaddr = ns_ioaddr(dev); |
2223 | |||
2224 | int work_to_do = min(*budget, dev->quota); | ||
2225 | int work_done = 0; | 2227 | int work_done = 0; |
2226 | 2228 | ||
2227 | do { | 2229 | do { |
@@ -2236,7 +2238,7 @@ static int natsemi_poll(struct net_device *dev, int *budget) | |||
2236 | if (np->intr_status & | 2238 | if (np->intr_status & |
2237 | (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | | 2239 | (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | |
2238 | IntrRxErr | IntrRxOverrun)) { | 2240 | IntrRxErr | IntrRxOverrun)) { |
2239 | netdev_rx(dev, &work_done, work_to_do); | 2241 | netdev_rx(dev, &work_done, budget); |
2240 | } | 2242 | } |
2241 | 2243 | ||
2242 | if (np->intr_status & | 2244 | if (np->intr_status & |
@@ -2250,16 +2252,13 @@ static int natsemi_poll(struct net_device *dev, int *budget) | |||
2250 | if (np->intr_status & IntrAbnormalSummary) | 2252 | if (np->intr_status & IntrAbnormalSummary) |
2251 | netdev_error(dev, np->intr_status); | 2253 | netdev_error(dev, np->intr_status); |
2252 | 2254 | ||
2253 | *budget -= work_done; | 2255 | if (work_done >= budget) |
2254 | dev->quota -= work_done; | 2256 | return work_done; |
2255 | |||
2256 | if (work_done >= work_to_do) | ||
2257 | return 1; | ||
2258 | 2257 | ||
2259 | np->intr_status = readl(ioaddr + IntrStatus); | 2258 | np->intr_status = readl(ioaddr + IntrStatus); |
2260 | } while (np->intr_status); | 2259 | } while (np->intr_status); |
2261 | 2260 | ||
2262 | netif_rx_complete(dev); | 2261 | netif_rx_complete(dev, napi); |
2263 | 2262 | ||
2264 | /* Reenable interrupts providing nothing is trying to shut | 2263 | /* Reenable interrupts providing nothing is trying to shut |
2265 | * the chip down. */ | 2264 | * the chip down. */ |
@@ -2268,7 +2267,7 @@ static int natsemi_poll(struct net_device *dev, int *budget) | |||
2268 | natsemi_irq_enable(dev); | 2267 | natsemi_irq_enable(dev); |
2269 | spin_unlock(&np->lock); | 2268 | spin_unlock(&np->lock); |
2270 | 2269 | ||
2271 | return 0; | 2270 | return work_done; |
2272 | } | 2271 | } |
2273 | 2272 | ||
2274 | /* This routine is logically part of the interrupt handler, but separated | 2273 | /* This routine is logically part of the interrupt handler, but separated |
@@ -3158,6 +3157,8 @@ static int netdev_close(struct net_device *dev) | |||
3158 | dev->name, np->cur_tx, np->dirty_tx, | 3157 | dev->name, np->cur_tx, np->dirty_tx, |
3159 | np->cur_rx, np->dirty_rx); | 3158 | np->cur_rx, np->dirty_rx); |
3160 | 3159 | ||
3160 | napi_disable(&np->napi); | ||
3161 | |||
3161 | /* | 3162 | /* |
3162 | * FIXME: what if someone tries to close a device | 3163 | * FIXME: what if someone tries to close a device |
3163 | * that is suspended? | 3164 | * that is suspended? |
@@ -3253,7 +3254,7 @@ static void __devexit natsemi_remove1 (struct pci_dev *pdev) | |||
3253 | * disable_irq() to enforce synchronization. | 3254 | * disable_irq() to enforce synchronization. |
3254 | * * natsemi_poll: checks before reenabling interrupts. suspend | 3255 | * * natsemi_poll: checks before reenabling interrupts. suspend |
3255 | * sets hands_off, disables interrupts and then waits with | 3256 | * sets hands_off, disables interrupts and then waits with |
3256 | * netif_poll_disable(). | 3257 | * napi_disable(). |
3257 | * | 3258 | * |
3258 | * Interrupts must be disabled, otherwise hands_off can cause irq storms. | 3259 | * Interrupts must be disabled, otherwise hands_off can cause irq storms. |
3259 | */ | 3260 | */ |
@@ -3279,7 +3280,7 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) | |||
3279 | spin_unlock_irq(&np->lock); | 3280 | spin_unlock_irq(&np->lock); |
3280 | enable_irq(dev->irq); | 3281 | enable_irq(dev->irq); |
3281 | 3282 | ||
3282 | netif_poll_disable(dev); | 3283 | napi_disable(&np->napi); |
3283 | 3284 | ||
3284 | /* Update the error counts. */ | 3285 | /* Update the error counts. */ |
3285 | __get_stats(dev); | 3286 | __get_stats(dev); |
@@ -3320,6 +3321,8 @@ static int natsemi_resume (struct pci_dev *pdev) | |||
3320 | pci_enable_device(pdev); | 3321 | pci_enable_device(pdev); |
3321 | /* pci_power_on(pdev); */ | 3322 | /* pci_power_on(pdev); */ |
3322 | 3323 | ||
3324 | napi_enable(&np->napi); | ||
3325 | |||
3323 | natsemi_reset(dev); | 3326 | natsemi_reset(dev); |
3324 | init_ring(dev); | 3327 | init_ring(dev); |
3325 | disable_irq(dev->irq); | 3328 | disable_irq(dev->irq); |
@@ -3333,7 +3336,6 @@ static int natsemi_resume (struct pci_dev *pdev) | |||
3333 | mod_timer(&np->timer, jiffies + 1*HZ); | 3336 | mod_timer(&np->timer, jiffies + 1*HZ); |
3334 | } | 3337 | } |
3335 | netif_device_attach(dev); | 3338 | netif_device_attach(dev); |
3336 | netif_poll_enable(dev); | ||
3337 | out: | 3339 | out: |
3338 | rtnl_unlock(); | 3340 | rtnl_unlock(); |
3339 | return 0; | 3341 | return 0; |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index d4c92cc879d4..aaa34939485b 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -880,6 +880,7 @@ struct netxen_adapter { | |||
880 | struct netxen_adapter *master; | 880 | struct netxen_adapter *master; |
881 | struct net_device *netdev; | 881 | struct net_device *netdev; |
882 | struct pci_dev *pdev; | 882 | struct pci_dev *pdev; |
883 | struct napi_struct napi; | ||
883 | struct net_device_stats net_stats; | 884 | struct net_device_stats net_stats; |
884 | unsigned char mac_addr[ETH_ALEN]; | 885 | unsigned char mac_addr[ETH_ALEN]; |
885 | int mtu; | 886 | int mtu; |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 3122d0101638..a10bbefbdadd 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -68,7 +68,7 @@ static void netxen_tx_timeout(struct net_device *netdev); | |||
68 | static void netxen_tx_timeout_task(struct work_struct *work); | 68 | static void netxen_tx_timeout_task(struct work_struct *work); |
69 | static void netxen_watchdog(unsigned long); | 69 | static void netxen_watchdog(unsigned long); |
70 | static int netxen_handle_int(struct netxen_adapter *, struct net_device *); | 70 | static int netxen_handle_int(struct netxen_adapter *, struct net_device *); |
71 | static int netxen_nic_poll(struct net_device *dev, int *budget); | 71 | static int netxen_nic_poll(struct napi_struct *napi, int budget); |
72 | #ifdef CONFIG_NET_POLL_CONTROLLER | 72 | #ifdef CONFIG_NET_POLL_CONTROLLER |
73 | static void netxen_nic_poll_controller(struct net_device *netdev); | 73 | static void netxen_nic_poll_controller(struct net_device *netdev); |
74 | #endif | 74 | #endif |
@@ -402,6 +402,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
402 | adapter->netdev = netdev; | 402 | adapter->netdev = netdev; |
403 | adapter->pdev = pdev; | 403 | adapter->pdev = pdev; |
404 | 404 | ||
405 | netif_napi_add(netdev, &adapter->napi, | ||
406 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); | ||
407 | |||
405 | /* this will be read from FW later */ | 408 | /* this will be read from FW later */ |
406 | adapter->intr_scheme = -1; | 409 | adapter->intr_scheme = -1; |
407 | 410 | ||
@@ -422,8 +425,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
422 | netxen_nic_change_mtu(netdev, netdev->mtu); | 425 | netxen_nic_change_mtu(netdev, netdev->mtu); |
423 | 426 | ||
424 | SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); | 427 | SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); |
425 | netdev->poll = netxen_nic_poll; | ||
426 | netdev->weight = NETXEN_NETDEV_WEIGHT; | ||
427 | #ifdef CONFIG_NET_POLL_CONTROLLER | 428 | #ifdef CONFIG_NET_POLL_CONTROLLER |
428 | netdev->poll_controller = netxen_nic_poll_controller; | 429 | netdev->poll_controller = netxen_nic_poll_controller; |
429 | #endif | 430 | #endif |
@@ -885,6 +886,8 @@ static int netxen_nic_open(struct net_device *netdev) | |||
885 | if (!adapter->driver_mismatch) | 886 | if (!adapter->driver_mismatch) |
886 | mod_timer(&adapter->watchdog_timer, jiffies); | 887 | mod_timer(&adapter->watchdog_timer, jiffies); |
887 | 888 | ||
889 | napi_enable(&adapter->napi); | ||
890 | |||
888 | netxen_nic_enable_int(adapter); | 891 | netxen_nic_enable_int(adapter); |
889 | 892 | ||
890 | /* Done here again so that even if phantom sw overwrote it, | 893 | /* Done here again so that even if phantom sw overwrote it, |
@@ -894,6 +897,7 @@ static int netxen_nic_open(struct net_device *netdev) | |||
894 | del_timer_sync(&adapter->watchdog_timer); | 897 | del_timer_sync(&adapter->watchdog_timer); |
895 | printk(KERN_ERR "%s: Failed to initialize port %d\n", | 898 | printk(KERN_ERR "%s: Failed to initialize port %d\n", |
896 | netxen_nic_driver_name, adapter->portnum); | 899 | netxen_nic_driver_name, adapter->portnum); |
900 | napi_disable(&adapter->napi); | ||
897 | return -EIO; | 901 | return -EIO; |
898 | } | 902 | } |
899 | if (adapter->macaddr_set) | 903 | if (adapter->macaddr_set) |
@@ -923,6 +927,7 @@ static int netxen_nic_close(struct net_device *netdev) | |||
923 | 927 | ||
924 | netif_carrier_off(netdev); | 928 | netif_carrier_off(netdev); |
925 | netif_stop_queue(netdev); | 929 | netif_stop_queue(netdev); |
930 | napi_disable(&adapter->napi); | ||
926 | 931 | ||
927 | netxen_nic_disable_int(adapter); | 932 | netxen_nic_disable_int(adapter); |
928 | 933 | ||
@@ -1243,11 +1248,11 @@ netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev) | |||
1243 | netxen_nic_disable_int(adapter); | 1248 | netxen_nic_disable_int(adapter); |
1244 | 1249 | ||
1245 | if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) { | 1250 | if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) { |
1246 | if (netif_rx_schedule_prep(netdev)) { | 1251 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { |
1247 | /* | 1252 | /* |
1248 | * Interrupts are already disabled. | 1253 | * Interrupts are already disabled. |
1249 | */ | 1254 | */ |
1250 | __netif_rx_schedule(netdev); | 1255 | __netif_rx_schedule(netdev, &adapter->napi); |
1251 | } else { | 1256 | } else { |
1252 | static unsigned int intcount = 0; | 1257 | static unsigned int intcount = 0; |
1253 | if ((++intcount & 0xfff) == 0xfff) | 1258 | if ((++intcount & 0xfff) == 0xfff) |
@@ -1305,14 +1310,13 @@ irqreturn_t netxen_intr(int irq, void *data) | |||
1305 | return IRQ_HANDLED; | 1310 | return IRQ_HANDLED; |
1306 | } | 1311 | } |
1307 | 1312 | ||
1308 | static int netxen_nic_poll(struct net_device *netdev, int *budget) | 1313 | static int netxen_nic_poll(struct napi_struct *napi, int budget) |
1309 | { | 1314 | { |
1310 | struct netxen_adapter *adapter = netdev_priv(netdev); | 1315 | struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi); |
1311 | int work_to_do = min(*budget, netdev->quota); | 1316 | struct net_device *netdev = adapter->netdev; |
1312 | int done = 1; | 1317 | int done = 1; |
1313 | int ctx; | 1318 | int ctx; |
1314 | int this_work_done; | 1319 | int work_done; |
1315 | int work_done = 0; | ||
1316 | 1320 | ||
1317 | DPRINTK(INFO, "polling for %d descriptors\n", *budget); | 1321 | DPRINTK(INFO, "polling for %d descriptors\n", *budget); |
1318 | 1322 | ||
@@ -1330,16 +1334,11 @@ static int netxen_nic_poll(struct net_device *netdev, int *budget) | |||
1330 | * packets are on one context, it gets only half of the quota, | 1334 | * packets are on one context, it gets only half of the quota, |
1331 | * and ends up not processing it. | 1335 | * and ends up not processing it. |
1332 | */ | 1336 | */ |
1333 | this_work_done = netxen_process_rcv_ring(adapter, ctx, | 1337 | work_done += netxen_process_rcv_ring(adapter, ctx, |
1334 | work_to_do / | 1338 | budget / MAX_RCV_CTX); |
1335 | MAX_RCV_CTX); | ||
1336 | work_done += this_work_done; | ||
1337 | } | 1339 | } |
1338 | 1340 | ||
1339 | netdev->quota -= work_done; | 1341 | if (work_done >= budget && netxen_nic_rx_has_work(adapter) != 0) |
1340 | *budget -= work_done; | ||
1341 | |||
1342 | if (work_done >= work_to_do && netxen_nic_rx_has_work(adapter) != 0) | ||
1343 | done = 0; | 1342 | done = 0; |
1344 | 1343 | ||
1345 | if (netxen_process_cmd_ring((unsigned long)adapter) == 0) | 1344 | if (netxen_process_cmd_ring((unsigned long)adapter) == 0) |
@@ -1348,11 +1347,11 @@ static int netxen_nic_poll(struct net_device *netdev, int *budget) | |||
1348 | DPRINTK(INFO, "new work_done: %d work_to_do: %d\n", | 1347 | DPRINTK(INFO, "new work_done: %d work_to_do: %d\n", |
1349 | work_done, work_to_do); | 1348 | work_done, work_to_do); |
1350 | if (done) { | 1349 | if (done) { |
1351 | netif_rx_complete(netdev); | 1350 | netif_rx_complete(netdev, napi); |
1352 | netxen_nic_enable_int(adapter); | 1351 | netxen_nic_enable_int(adapter); |
1353 | } | 1352 | } |
1354 | 1353 | ||
1355 | return !done; | 1354 | return work_done; |
1356 | } | 1355 | } |
1357 | 1356 | ||
1358 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1357 | #ifdef CONFIG_NET_POLL_CONTROLLER |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 0b3066a6fe40..e63cc335a4ba 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -584,7 +584,7 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) | |||
584 | if (*mac->rx_status & PAS_STATUS_TIMER) | 584 | if (*mac->rx_status & PAS_STATUS_TIMER) |
585 | reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; | 585 | reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; |
586 | 586 | ||
587 | netif_rx_schedule(dev); | 587 | netif_rx_schedule(dev, &mac->napi); |
588 | 588 | ||
589 | pci_write_config_dword(mac->iob_pdev, | 589 | pci_write_config_dword(mac->iob_pdev, |
590 | PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); | 590 | PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); |
@@ -808,7 +808,7 @@ static int pasemi_mac_open(struct net_device *dev) | |||
808 | dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret); | 808 | dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret); |
809 | 809 | ||
810 | netif_start_queue(dev); | 810 | netif_start_queue(dev); |
811 | netif_poll_enable(dev); | 811 | napi_enable(&mac->napi); |
812 | 812 | ||
813 | /* Interrupts are a bit different for our DMA controller: While | 813 | /* Interrupts are a bit different for our DMA controller: While |
814 | * it's got one a regular PCI device header, the interrupt there | 814 | * it's got one a regular PCI device header, the interrupt there |
@@ -845,7 +845,7 @@ static int pasemi_mac_open(struct net_device *dev) | |||
845 | out_rx_int: | 845 | out_rx_int: |
846 | free_irq(mac->tx_irq, dev); | 846 | free_irq(mac->tx_irq, dev); |
847 | out_tx_int: | 847 | out_tx_int: |
848 | netif_poll_disable(dev); | 848 | napi_disable(&mac->napi); |
849 | netif_stop_queue(dev); | 849 | netif_stop_queue(dev); |
850 | pasemi_mac_free_tx_resources(dev); | 850 | pasemi_mac_free_tx_resources(dev); |
851 | out_tx_resources: | 851 | out_tx_resources: |
@@ -869,6 +869,7 @@ static int pasemi_mac_close(struct net_device *dev) | |||
869 | } | 869 | } |
870 | 870 | ||
871 | netif_stop_queue(dev); | 871 | netif_stop_queue(dev); |
872 | napi_disable(&mac->napi); | ||
872 | 873 | ||
873 | /* Clean out any pending buffers */ | 874 | /* Clean out any pending buffers */ |
874 | pasemi_mac_clean_tx(mac); | 875 | pasemi_mac_clean_tx(mac); |
@@ -1047,26 +1048,20 @@ static void pasemi_mac_set_rx_mode(struct net_device *dev) | |||
1047 | } | 1048 | } |
1048 | 1049 | ||
1049 | 1050 | ||
1050 | static int pasemi_mac_poll(struct net_device *dev, int *budget) | 1051 | static int pasemi_mac_poll(struct napi_struct *napi, int budget) |
1051 | { | 1052 | { |
1052 | int pkts, limit = min(*budget, dev->quota); | 1053 | struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); |
1053 | struct pasemi_mac *mac = netdev_priv(dev); | 1054 | struct net_device *dev = mac->netdev; |
1054 | 1055 | int pkts; | |
1055 | pkts = pasemi_mac_clean_rx(mac, limit); | ||
1056 | 1056 | ||
1057 | dev->quota -= pkts; | 1057 | pkts = pasemi_mac_clean_rx(mac, budget); |
1058 | *budget -= pkts; | 1058 | if (pkts < budget) { |
1059 | |||
1060 | if (pkts < limit) { | ||
1061 | /* all done, no more packets present */ | 1059 | /* all done, no more packets present */ |
1062 | netif_rx_complete(dev); | 1060 | netif_rx_complete(dev, napi); |
1063 | 1061 | ||
1064 | pasemi_mac_restart_rx_intr(mac); | 1062 | pasemi_mac_restart_rx_intr(mac); |
1065 | return 0; | ||
1066 | } else { | ||
1067 | /* used up our quantum, so reschedule */ | ||
1068 | return 1; | ||
1069 | } | 1063 | } |
1064 | return pkts; | ||
1070 | } | 1065 | } |
1071 | 1066 | ||
1072 | static int __devinit | 1067 | static int __devinit |
@@ -1099,6 +1094,10 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1099 | mac->netdev = dev; | 1094 | mac->netdev = dev; |
1100 | mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); | 1095 | mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); |
1101 | 1096 | ||
1097 | netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); | ||
1098 | |||
1099 | dev->features = NETIF_F_HW_CSUM; | ||
1100 | |||
1102 | if (!mac->dma_pdev) { | 1101 | if (!mac->dma_pdev) { |
1103 | dev_err(&pdev->dev, "Can't find DMA Controller\n"); | 1102 | dev_err(&pdev->dev, "Can't find DMA Controller\n"); |
1104 | err = -ENODEV; | 1103 | err = -ENODEV; |
@@ -1150,9 +1149,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1150 | dev->hard_start_xmit = pasemi_mac_start_tx; | 1149 | dev->hard_start_xmit = pasemi_mac_start_tx; |
1151 | dev->get_stats = pasemi_mac_get_stats; | 1150 | dev->get_stats = pasemi_mac_get_stats; |
1152 | dev->set_multicast_list = pasemi_mac_set_rx_mode; | 1151 | dev->set_multicast_list = pasemi_mac_set_rx_mode; |
1153 | dev->weight = 64; | ||
1154 | dev->poll = pasemi_mac_poll; | ||
1155 | dev->features = NETIF_F_HW_CSUM; | ||
1156 | 1152 | ||
1157 | /* The dma status structure is located in the I/O bridge, and | 1153 | /* The dma status structure is located in the I/O bridge, and |
1158 | * is cache coherent. | 1154 | * is cache coherent. |
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index c29ee159c33d..85d3b7856e5f 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h | |||
@@ -56,6 +56,7 @@ struct pasemi_mac { | |||
56 | struct pci_dev *dma_pdev; | 56 | struct pci_dev *dma_pdev; |
57 | struct pci_dev *iob_pdev; | 57 | struct pci_dev *iob_pdev; |
58 | struct phy_device *phydev; | 58 | struct phy_device *phydev; |
59 | struct napi_struct napi; | ||
59 | struct net_device_stats stats; | 60 | struct net_device_stats stats; |
60 | 61 | ||
61 | /* Pointer to the cacheable per-channel status registers */ | 62 | /* Pointer to the cacheable per-channel status registers */ |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index e6a67531de99..a9973490dba9 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -280,6 +280,8 @@ struct pcnet32_private { | |||
280 | unsigned int dirty_rx, /* ring entries to be freed. */ | 280 | unsigned int dirty_rx, /* ring entries to be freed. */ |
281 | dirty_tx; | 281 | dirty_tx; |
282 | 282 | ||
283 | struct net_device *dev; | ||
284 | struct napi_struct napi; | ||
283 | struct net_device_stats stats; | 285 | struct net_device_stats stats; |
284 | char tx_full; | 286 | char tx_full; |
285 | char phycount; /* number of phys found */ | 287 | char phycount; /* number of phys found */ |
@@ -440,15 +442,21 @@ static struct pcnet32_access pcnet32_dwio = { | |||
440 | 442 | ||
441 | static void pcnet32_netif_stop(struct net_device *dev) | 443 | static void pcnet32_netif_stop(struct net_device *dev) |
442 | { | 444 | { |
445 | struct pcnet32_private *lp = netdev_priv(dev); | ||
443 | dev->trans_start = jiffies; | 446 | dev->trans_start = jiffies; |
444 | netif_poll_disable(dev); | 447 | #ifdef CONFIG_PCNET32_NAPI |
448 | napi_disable(&lp->napi); | ||
449 | #endif | ||
445 | netif_tx_disable(dev); | 450 | netif_tx_disable(dev); |
446 | } | 451 | } |
447 | 452 | ||
448 | static void pcnet32_netif_start(struct net_device *dev) | 453 | static void pcnet32_netif_start(struct net_device *dev) |
449 | { | 454 | { |
455 | struct pcnet32_private *lp = netdev_priv(dev); | ||
450 | netif_wake_queue(dev); | 456 | netif_wake_queue(dev); |
451 | netif_poll_enable(dev); | 457 | #ifdef CONFIG_PCNET32_NAPI |
458 | napi_enable(&lp->napi); | ||
459 | #endif | ||
452 | } | 460 | } |
453 | 461 | ||
454 | /* | 462 | /* |
@@ -816,7 +824,7 @@ static int pcnet32_set_ringparam(struct net_device *dev, | |||
816 | if ((1 << i) != lp->rx_ring_size) | 824 | if ((1 << i) != lp->rx_ring_size) |
817 | pcnet32_realloc_rx_ring(dev, lp, i); | 825 | pcnet32_realloc_rx_ring(dev, lp, i); |
818 | 826 | ||
819 | dev->weight = lp->rx_ring_size / 2; | 827 | lp->napi.weight = lp->rx_ring_size / 2; |
820 | 828 | ||
821 | if (netif_running(dev)) { | 829 | if (netif_running(dev)) { |
822 | pcnet32_netif_start(dev); | 830 | pcnet32_netif_start(dev); |
@@ -1255,7 +1263,7 @@ static void pcnet32_rx_entry(struct net_device *dev, | |||
1255 | return; | 1263 | return; |
1256 | } | 1264 | } |
1257 | 1265 | ||
1258 | static int pcnet32_rx(struct net_device *dev, int quota) | 1266 | static int pcnet32_rx(struct net_device *dev, int budget) |
1259 | { | 1267 | { |
1260 | struct pcnet32_private *lp = netdev_priv(dev); | 1268 | struct pcnet32_private *lp = netdev_priv(dev); |
1261 | int entry = lp->cur_rx & lp->rx_mod_mask; | 1269 | int entry = lp->cur_rx & lp->rx_mod_mask; |
@@ -1263,7 +1271,7 @@ static int pcnet32_rx(struct net_device *dev, int quota) | |||
1263 | int npackets = 0; | 1271 | int npackets = 0; |
1264 | 1272 | ||
1265 | /* If we own the next entry, it's a new packet. Send it up. */ | 1273 | /* If we own the next entry, it's a new packet. Send it up. */ |
1266 | while (quota > npackets && (short)le16_to_cpu(rxp->status) >= 0) { | 1274 | while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { |
1267 | pcnet32_rx_entry(dev, lp, rxp, entry); | 1275 | pcnet32_rx_entry(dev, lp, rxp, entry); |
1268 | npackets += 1; | 1276 | npackets += 1; |
1269 | /* | 1277 | /* |
@@ -1379,15 +1387,16 @@ static int pcnet32_tx(struct net_device *dev) | |||
1379 | } | 1387 | } |
1380 | 1388 | ||
1381 | #ifdef CONFIG_PCNET32_NAPI | 1389 | #ifdef CONFIG_PCNET32_NAPI |
1382 | static int pcnet32_poll(struct net_device *dev, int *budget) | 1390 | static int pcnet32_poll(struct napi_struct *napi, int budget) |
1383 | { | 1391 | { |
1384 | struct pcnet32_private *lp = netdev_priv(dev); | 1392 | struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); |
1385 | int quota = min(dev->quota, *budget); | 1393 | struct net_device *dev = lp->dev; |
1386 | unsigned long ioaddr = dev->base_addr; | 1394 | unsigned long ioaddr = dev->base_addr; |
1387 | unsigned long flags; | 1395 | unsigned long flags; |
1396 | int work_done; | ||
1388 | u16 val; | 1397 | u16 val; |
1389 | 1398 | ||
1390 | quota = pcnet32_rx(dev, quota); | 1399 | work_done = pcnet32_rx(dev, budget); |
1391 | 1400 | ||
1392 | spin_lock_irqsave(&lp->lock, flags); | 1401 | spin_lock_irqsave(&lp->lock, flags); |
1393 | if (pcnet32_tx(dev)) { | 1402 | if (pcnet32_tx(dev)) { |
@@ -1399,28 +1408,22 @@ static int pcnet32_poll(struct net_device *dev, int *budget) | |||
1399 | } | 1408 | } |
1400 | spin_unlock_irqrestore(&lp->lock, flags); | 1409 | spin_unlock_irqrestore(&lp->lock, flags); |
1401 | 1410 | ||
1402 | *budget -= quota; | 1411 | if (work_done < budget) { |
1403 | dev->quota -= quota; | 1412 | spin_lock_irqsave(&lp->lock, flags); |
1404 | |||
1405 | if (dev->quota == 0) { | ||
1406 | return 1; | ||
1407 | } | ||
1408 | |||
1409 | netif_rx_complete(dev); | ||
1410 | |||
1411 | spin_lock_irqsave(&lp->lock, flags); | ||
1412 | 1413 | ||
1413 | /* clear interrupt masks */ | 1414 | __netif_rx_complete(dev, napi); |
1414 | val = lp->a.read_csr(ioaddr, CSR3); | ||
1415 | val &= 0x00ff; | ||
1416 | lp->a.write_csr(ioaddr, CSR3, val); | ||
1417 | 1415 | ||
1418 | /* Set interrupt enable. */ | 1416 | /* clear interrupt masks */ |
1419 | lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); | 1417 | val = lp->a.read_csr(ioaddr, CSR3); |
1420 | mmiowb(); | 1418 | val &= 0x00ff; |
1421 | spin_unlock_irqrestore(&lp->lock, flags); | 1419 | lp->a.write_csr(ioaddr, CSR3, val); |
1422 | 1420 | ||
1423 | return 0; | 1421 | /* Set interrupt enable. */ |
1422 | lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); | ||
1423 | mmiowb(); | ||
1424 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1425 | } | ||
1426 | return work_done; | ||
1424 | } | 1427 | } |
1425 | #endif | 1428 | #endif |
1426 | 1429 | ||
@@ -1815,6 +1818,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1815 | } | 1818 | } |
1816 | lp->pci_dev = pdev; | 1819 | lp->pci_dev = pdev; |
1817 | 1820 | ||
1821 | lp->dev = dev; | ||
1822 | |||
1818 | spin_lock_init(&lp->lock); | 1823 | spin_lock_init(&lp->lock); |
1819 | 1824 | ||
1820 | SET_MODULE_OWNER(dev); | 1825 | SET_MODULE_OWNER(dev); |
@@ -1843,6 +1848,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1843 | lp->mii_if.mdio_read = mdio_read; | 1848 | lp->mii_if.mdio_read = mdio_read; |
1844 | lp->mii_if.mdio_write = mdio_write; | 1849 | lp->mii_if.mdio_write = mdio_write; |
1845 | 1850 | ||
1851 | #ifdef CONFIG_PCNET32_NAPI | ||
1852 | netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); | ||
1853 | #endif | ||
1854 | |||
1846 | if (fdx && !(lp->options & PCNET32_PORT_ASEL) && | 1855 | if (fdx && !(lp->options & PCNET32_PORT_ASEL) && |
1847 | ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) | 1856 | ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) |
1848 | lp->options |= PCNET32_PORT_FD; | 1857 | lp->options |= PCNET32_PORT_FD; |
@@ -1953,10 +1962,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1953 | dev->ethtool_ops = &pcnet32_ethtool_ops; | 1962 | dev->ethtool_ops = &pcnet32_ethtool_ops; |
1954 | dev->tx_timeout = pcnet32_tx_timeout; | 1963 | dev->tx_timeout = pcnet32_tx_timeout; |
1955 | dev->watchdog_timeo = (5 * HZ); | 1964 | dev->watchdog_timeo = (5 * HZ); |
1956 | dev->weight = lp->rx_ring_size / 2; | ||
1957 | #ifdef CONFIG_PCNET32_NAPI | ||
1958 | dev->poll = pcnet32_poll; | ||
1959 | #endif | ||
1960 | 1965 | ||
1961 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1966 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1962 | dev->poll_controller = pcnet32_poll_controller; | 1967 | dev->poll_controller = pcnet32_poll_controller; |
@@ -2276,6 +2281,10 @@ static int pcnet32_open(struct net_device *dev) | |||
2276 | goto err_free_ring; | 2281 | goto err_free_ring; |
2277 | } | 2282 | } |
2278 | 2283 | ||
2284 | #ifdef CONFIG_PCNET32_NAPI | ||
2285 | napi_enable(&lp->napi); | ||
2286 | #endif | ||
2287 | |||
2279 | /* Re-initialize the PCNET32, and start it when done. */ | 2288 | /* Re-initialize the PCNET32, and start it when done. */ |
2280 | lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); | 2289 | lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); |
2281 | lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); | 2290 | lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); |
@@ -2599,18 +2608,18 @@ pcnet32_interrupt(int irq, void *dev_id) | |||
2599 | /* unlike for the lance, there is no restart needed */ | 2608 | /* unlike for the lance, there is no restart needed */ |
2600 | } | 2609 | } |
2601 | #ifdef CONFIG_PCNET32_NAPI | 2610 | #ifdef CONFIG_PCNET32_NAPI |
2602 | if (netif_rx_schedule_prep(dev)) { | 2611 | if (netif_rx_schedule_prep(dev, &lp->napi)) { |
2603 | u16 val; | 2612 | u16 val; |
2604 | /* set interrupt masks */ | 2613 | /* set interrupt masks */ |
2605 | val = lp->a.read_csr(ioaddr, CSR3); | 2614 | val = lp->a.read_csr(ioaddr, CSR3); |
2606 | val |= 0x5f00; | 2615 | val |= 0x5f00; |
2607 | lp->a.write_csr(ioaddr, CSR3, val); | 2616 | lp->a.write_csr(ioaddr, CSR3, val); |
2608 | mmiowb(); | 2617 | mmiowb(); |
2609 | __netif_rx_schedule(dev); | 2618 | __netif_rx_schedule(dev, &lp->napi); |
2610 | break; | 2619 | break; |
2611 | } | 2620 | } |
2612 | #else | 2621 | #else |
2613 | pcnet32_rx(dev, dev->weight); | 2622 | pcnet32_rx(dev, lp->napi.weight); |
2614 | if (pcnet32_tx(dev)) { | 2623 | if (pcnet32_tx(dev)) { |
2615 | /* reset the chip to clear the error condition, then restart */ | 2624 | /* reset the chip to clear the error condition, then restart */ |
2616 | lp->a.reset(ioaddr); | 2625 | lp->a.reset(ioaddr); |
@@ -2645,6 +2654,9 @@ static int pcnet32_close(struct net_device *dev) | |||
2645 | del_timer_sync(&lp->watchdog_timer); | 2654 | del_timer_sync(&lp->watchdog_timer); |
2646 | 2655 | ||
2647 | netif_stop_queue(dev); | 2656 | netif_stop_queue(dev); |
2657 | #ifdef CONFIG_PCNET32_NAPI | ||
2658 | napi_disable(&lp->napi); | ||
2659 | #endif | ||
2648 | 2660 | ||
2649 | spin_lock_irqsave(&lp->lock, flags); | 2661 | spin_lock_irqsave(&lp->lock, flags); |
2650 | 2662 | ||
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index e56503918436..92561c0450bc 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c | |||
@@ -556,6 +556,7 @@ static int gelic_net_stop(struct net_device *netdev) | |||
556 | { | 556 | { |
557 | struct gelic_net_card *card = netdev_priv(netdev); | 557 | struct gelic_net_card *card = netdev_priv(netdev); |
558 | 558 | ||
559 | napi_disable(&card->napi); | ||
559 | netif_stop_queue(netdev); | 560 | netif_stop_queue(netdev); |
560 | 561 | ||
561 | /* turn off DMA, force end */ | 562 | /* turn off DMA, force end */ |
@@ -987,32 +988,24 @@ refill: | |||
987 | * if the quota is exceeded, but the driver has still packets. | 988 | * if the quota is exceeded, but the driver has still packets. |
988 | * | 989 | * |
989 | */ | 990 | */ |
990 | static int gelic_net_poll(struct net_device *netdev, int *budget) | 991 | static int gelic_net_poll(struct napi_struct *napi, int budget) |
991 | { | 992 | { |
992 | struct gelic_net_card *card = netdev_priv(netdev); | 993 | struct gelic_net_card *card = container_of(napi, struct gelic_net_card, napi); |
993 | int packets_to_do, packets_done = 0; | 994 | struct net_device *netdev = card->netdev; |
994 | int no_more_packets = 0; | 995 | int packets_done = 0; |
995 | |||
996 | packets_to_do = min(*budget, netdev->quota); | ||
997 | 996 | ||
998 | while (packets_to_do) { | 997 | while (packets_done < budget) { |
999 | if (gelic_net_decode_one_descr(card)) { | 998 | if (!gelic_net_decode_one_descr(card)) |
1000 | packets_done++; | ||
1001 | packets_to_do--; | ||
1002 | } else { | ||
1003 | /* no more packets for the stack */ | ||
1004 | no_more_packets = 1; | ||
1005 | break; | 999 | break; |
1006 | } | 1000 | |
1001 | packets_done++; | ||
1007 | } | 1002 | } |
1008 | netdev->quota -= packets_done; | 1003 | |
1009 | *budget -= packets_done; | 1004 | if (packets_done < budget) { |
1010 | if (no_more_packets) { | 1005 | netif_rx_complete(netdev, napi); |
1011 | netif_rx_complete(netdev); | ||
1012 | gelic_net_rx_irq_on(card); | 1006 | gelic_net_rx_irq_on(card); |
1013 | return 0; | 1007 | } |
1014 | } else | 1008 | return packets_done; |
1015 | return 1; | ||
1016 | } | 1009 | } |
1017 | /** | 1010 | /** |
1018 | * gelic_net_change_mtu - changes the MTU of an interface | 1011 | * gelic_net_change_mtu - changes the MTU of an interface |
@@ -1055,7 +1048,7 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr) | |||
1055 | 1048 | ||
1056 | if (status & GELIC_NET_RXINT) { | 1049 | if (status & GELIC_NET_RXINT) { |
1057 | gelic_net_rx_irq_off(card); | 1050 | gelic_net_rx_irq_off(card); |
1058 | netif_rx_schedule(netdev); | 1051 | netif_rx_schedule(netdev, &card->napi); |
1059 | } | 1052 | } |
1060 | 1053 | ||
1061 | if (status & GELIC_NET_TXINT) { | 1054 | if (status & GELIC_NET_TXINT) { |
@@ -1159,6 +1152,8 @@ static int gelic_net_open(struct net_device *netdev) | |||
1159 | if (gelic_net_alloc_rx_skbs(card)) | 1152 | if (gelic_net_alloc_rx_skbs(card)) |
1160 | goto alloc_skbs_failed; | 1153 | goto alloc_skbs_failed; |
1161 | 1154 | ||
1155 | napi_enable(&card->napi); | ||
1156 | |||
1162 | card->tx_dma_progress = 0; | 1157 | card->tx_dma_progress = 0; |
1163 | card->ghiintmask = GELIC_NET_RXINT | GELIC_NET_TXINT; | 1158 | card->ghiintmask = GELIC_NET_RXINT | GELIC_NET_TXINT; |
1164 | 1159 | ||
@@ -1360,9 +1355,6 @@ static void gelic_net_setup_netdev_ops(struct net_device *netdev) | |||
1360 | /* tx watchdog */ | 1355 | /* tx watchdog */ |
1361 | netdev->tx_timeout = &gelic_net_tx_timeout; | 1356 | netdev->tx_timeout = &gelic_net_tx_timeout; |
1362 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; | 1357 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; |
1363 | /* NAPI */ | ||
1364 | netdev->poll = &gelic_net_poll; | ||
1365 | netdev->weight = GELIC_NET_NAPI_WEIGHT; | ||
1366 | netdev->ethtool_ops = &gelic_net_ethtool_ops; | 1358 | netdev->ethtool_ops = &gelic_net_ethtool_ops; |
1367 | } | 1359 | } |
1368 | 1360 | ||
@@ -1390,6 +1382,9 @@ static int gelic_net_setup_netdev(struct gelic_net_card *card) | |||
1390 | 1382 | ||
1391 | gelic_net_setup_netdev_ops(netdev); | 1383 | gelic_net_setup_netdev_ops(netdev); |
1392 | 1384 | ||
1385 | netif_napi_add(netdev, &card->napi, | ||
1386 | gelic_net_poll, GELIC_NET_NAPI_WEIGHT); | ||
1387 | |||
1393 | netdev->features = NETIF_F_IP_CSUM; | 1388 | netdev->features = NETIF_F_IP_CSUM; |
1394 | 1389 | ||
1395 | status = lv1_net_control(bus_id(card), dev_id(card), | 1390 | status = lv1_net_control(bus_id(card), dev_id(card), |
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h index a9c4c4fc2547..968560269a3b 100644 --- a/drivers/net/ps3_gelic_net.h +++ b/drivers/net/ps3_gelic_net.h | |||
@@ -194,6 +194,7 @@ struct gelic_net_descr_chain { | |||
194 | 194 | ||
195 | struct gelic_net_card { | 195 | struct gelic_net_card { |
196 | struct net_device *netdev; | 196 | struct net_device *netdev; |
197 | struct napi_struct napi; | ||
197 | /* | 198 | /* |
198 | * hypervisor requires irq_status should be | 199 | * hypervisor requires irq_status should be |
199 | * 8 bytes aligned, but u64 member is | 200 | * 8 bytes aligned, but u64 member is |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index ea151315050c..bf9f8f64ba67 100755 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -2310,10 +2310,10 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, | |||
2310 | return work_done; | 2310 | return work_done; |
2311 | } | 2311 | } |
2312 | 2312 | ||
2313 | static int ql_poll(struct net_device *ndev, int *budget) | 2313 | static int ql_poll(struct napi_struct *napi, int budget) |
2314 | { | 2314 | { |
2315 | struct ql3_adapter *qdev = netdev_priv(ndev); | 2315 | struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); |
2316 | int work_to_do = min(*budget, ndev->quota); | 2316 | struct net_device *ndev = qdev->ndev; |
2317 | int rx_cleaned = 0, tx_cleaned = 0; | 2317 | int rx_cleaned = 0, tx_cleaned = 0; |
2318 | unsigned long hw_flags; | 2318 | unsigned long hw_flags; |
2319 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 2319 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; |
@@ -2321,16 +2321,13 @@ static int ql_poll(struct net_device *ndev, int *budget) | |||
2321 | if (!netif_carrier_ok(ndev)) | 2321 | if (!netif_carrier_ok(ndev)) |
2322 | goto quit_polling; | 2322 | goto quit_polling; |
2323 | 2323 | ||
2324 | ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do); | 2324 | ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); |
2325 | *budget -= rx_cleaned; | ||
2326 | ndev->quota -= rx_cleaned; | ||
2327 | 2325 | ||
2328 | if( tx_cleaned + rx_cleaned != work_to_do || | 2326 | if (tx_cleaned + rx_cleaned != budget || |
2329 | !netif_running(ndev)) { | 2327 | !netif_running(ndev)) { |
2330 | quit_polling: | 2328 | quit_polling: |
2331 | netif_rx_complete(ndev); | ||
2332 | |||
2333 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 2329 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
2330 | __netif_rx_complete(ndev, napi); | ||
2334 | ql_update_small_bufq_prod_index(qdev); | 2331 | ql_update_small_bufq_prod_index(qdev); |
2335 | ql_update_lrg_bufq_prod_index(qdev); | 2332 | ql_update_lrg_bufq_prod_index(qdev); |
2336 | writel(qdev->rsp_consumer_index, | 2333 | writel(qdev->rsp_consumer_index, |
@@ -2338,9 +2335,8 @@ quit_polling: | |||
2338 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 2335 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
2339 | 2336 | ||
2340 | ql_enable_interrupts(qdev); | 2337 | ql_enable_interrupts(qdev); |
2341 | return 0; | ||
2342 | } | 2338 | } |
2343 | return 1; | 2339 | return tx_cleaned + rx_cleaned; |
2344 | } | 2340 | } |
2345 | 2341 | ||
2346 | static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | 2342 | static irqreturn_t ql3xxx_isr(int irq, void *dev_id) |
@@ -2390,8 +2386,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2390 | spin_unlock(&qdev->adapter_lock); | 2386 | spin_unlock(&qdev->adapter_lock); |
2391 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { | 2387 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { |
2392 | ql_disable_interrupts(qdev); | 2388 | ql_disable_interrupts(qdev); |
2393 | if (likely(netif_rx_schedule_prep(ndev))) { | 2389 | if (likely(netif_rx_schedule_prep(ndev, &qdev->napi))) { |
2394 | __netif_rx_schedule(ndev); | 2390 | __netif_rx_schedule(ndev, &qdev->napi); |
2395 | } | 2391 | } |
2396 | } else { | 2392 | } else { |
2397 | return IRQ_NONE; | 2393 | return IRQ_NONE; |
@@ -3617,7 +3613,7 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) | |||
3617 | 3613 | ||
3618 | del_timer_sync(&qdev->adapter_timer); | 3614 | del_timer_sync(&qdev->adapter_timer); |
3619 | 3615 | ||
3620 | netif_poll_disable(ndev); | 3616 | napi_disable(&qdev->napi); |
3621 | 3617 | ||
3622 | if (do_reset) { | 3618 | if (do_reset) { |
3623 | int soft_reset; | 3619 | int soft_reset; |
@@ -3705,7 +3701,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev) | |||
3705 | 3701 | ||
3706 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); | 3702 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); |
3707 | 3703 | ||
3708 | netif_poll_enable(ndev); | 3704 | napi_enable(&qdev->napi); |
3709 | ql_enable_interrupts(qdev); | 3705 | ql_enable_interrupts(qdev); |
3710 | return 0; | 3706 | return 0; |
3711 | 3707 | ||
@@ -4061,8 +4057,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
4061 | ndev->tx_timeout = ql3xxx_tx_timeout; | 4057 | ndev->tx_timeout = ql3xxx_tx_timeout; |
4062 | ndev->watchdog_timeo = 5 * HZ; | 4058 | ndev->watchdog_timeo = 5 * HZ; |
4063 | 4059 | ||
4064 | ndev->poll = &ql_poll; | 4060 | netif_napi_add(ndev, &qdev->napi, ql_poll, 64); |
4065 | ndev->weight = 64; | ||
4066 | 4061 | ||
4067 | ndev->irq = pdev->irq; | 4062 | ndev->irq = pdev->irq; |
4068 | 4063 | ||
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index 4a832c46c274..aa2216f0d7b8 100755 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h | |||
@@ -1175,6 +1175,8 @@ struct ql3_adapter { | |||
1175 | struct pci_dev *pdev; | 1175 | struct pci_dev *pdev; |
1176 | struct net_device *ndev; /* Parent NET device */ | 1176 | struct net_device *ndev; /* Parent NET device */ |
1177 | 1177 | ||
1178 | struct napi_struct napi; | ||
1179 | |||
1178 | /* Hardware information */ | 1180 | /* Hardware information */ |
1179 | u8 chip_rev_id; | 1181 | u8 chip_rev_id; |
1180 | u8 pci_slot; | 1182 | u8 pci_slot; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index c76dd29c8e9a..3f2306e3f517 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -384,6 +384,7 @@ struct rtl8169_private { | |||
384 | void __iomem *mmio_addr; /* memory map physical address */ | 384 | void __iomem *mmio_addr; /* memory map physical address */ |
385 | struct pci_dev *pci_dev; /* Index of PCI device */ | 385 | struct pci_dev *pci_dev; /* Index of PCI device */ |
386 | struct net_device *dev; | 386 | struct net_device *dev; |
387 | struct napi_struct napi; | ||
387 | struct net_device_stats stats; /* statistics of net device */ | 388 | struct net_device_stats stats; /* statistics of net device */ |
388 | spinlock_t lock; /* spin lock flag */ | 389 | spinlock_t lock; /* spin lock flag */ |
389 | u32 msg_enable; | 390 | u32 msg_enable; |
@@ -443,13 +444,13 @@ static void rtl_set_rx_mode(struct net_device *dev); | |||
443 | static void rtl8169_tx_timeout(struct net_device *dev); | 444 | static void rtl8169_tx_timeout(struct net_device *dev); |
444 | static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); | 445 | static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); |
445 | static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, | 446 | static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, |
446 | void __iomem *); | 447 | void __iomem *, u32 budget); |
447 | static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); | 448 | static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); |
448 | static void rtl8169_down(struct net_device *dev); | 449 | static void rtl8169_down(struct net_device *dev); |
449 | static void rtl8169_rx_clear(struct rtl8169_private *tp); | 450 | static void rtl8169_rx_clear(struct rtl8169_private *tp); |
450 | 451 | ||
451 | #ifdef CONFIG_R8169_NAPI | 452 | #ifdef CONFIG_R8169_NAPI |
452 | static int rtl8169_poll(struct net_device *dev, int *budget); | 453 | static int rtl8169_poll(struct napi_struct *napi, int budget); |
453 | #endif | 454 | #endif |
454 | 455 | ||
455 | static const unsigned int rtl8169_rx_config = | 456 | static const unsigned int rtl8169_rx_config = |
@@ -1656,8 +1657,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1656 | dev->set_mac_address = rtl_set_mac_address; | 1657 | dev->set_mac_address = rtl_set_mac_address; |
1657 | 1658 | ||
1658 | #ifdef CONFIG_R8169_NAPI | 1659 | #ifdef CONFIG_R8169_NAPI |
1659 | dev->poll = rtl8169_poll; | 1660 | netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); |
1660 | dev->weight = R8169_NAPI_WEIGHT; | ||
1661 | #endif | 1661 | #endif |
1662 | 1662 | ||
1663 | #ifdef CONFIG_R8169_VLAN | 1663 | #ifdef CONFIG_R8169_VLAN |
@@ -1777,6 +1777,10 @@ static int rtl8169_open(struct net_device *dev) | |||
1777 | if (retval < 0) | 1777 | if (retval < 0) |
1778 | goto err_release_ring_2; | 1778 | goto err_release_ring_2; |
1779 | 1779 | ||
1780 | #ifdef CONFIG_R8169_NAPI | ||
1781 | napi_enable(&tp->napi); | ||
1782 | #endif | ||
1783 | |||
1780 | rtl_hw_start(dev); | 1784 | rtl_hw_start(dev); |
1781 | 1785 | ||
1782 | rtl8169_request_timer(dev); | 1786 | rtl8169_request_timer(dev); |
@@ -2082,7 +2086,9 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) | |||
2082 | if (ret < 0) | 2086 | if (ret < 0) |
2083 | goto out; | 2087 | goto out; |
2084 | 2088 | ||
2085 | netif_poll_enable(dev); | 2089 | #ifdef CONFIG_R8169_NAPI |
2090 | napi_enable(&tp->napi); | ||
2091 | #endif | ||
2086 | 2092 | ||
2087 | rtl_hw_start(dev); | 2093 | rtl_hw_start(dev); |
2088 | 2094 | ||
@@ -2274,11 +2280,15 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) | |||
2274 | synchronize_irq(dev->irq); | 2280 | synchronize_irq(dev->irq); |
2275 | 2281 | ||
2276 | /* Wait for any pending NAPI task to complete */ | 2282 | /* Wait for any pending NAPI task to complete */ |
2277 | netif_poll_disable(dev); | 2283 | #ifdef CONFIG_R8169_NAPI |
2284 | napi_disable(&tp->napi); | ||
2285 | #endif | ||
2278 | 2286 | ||
2279 | rtl8169_irq_mask_and_ack(ioaddr); | 2287 | rtl8169_irq_mask_and_ack(ioaddr); |
2280 | 2288 | ||
2281 | netif_poll_enable(dev); | 2289 | #ifdef CONFIG_R8169_NAPI |
2290 | napi_enable(&tp->napi); | ||
2291 | #endif | ||
2282 | } | 2292 | } |
2283 | 2293 | ||
2284 | static void rtl8169_reinit_task(struct work_struct *work) | 2294 | static void rtl8169_reinit_task(struct work_struct *work) |
@@ -2322,7 +2332,7 @@ static void rtl8169_reset_task(struct work_struct *work) | |||
2322 | 2332 | ||
2323 | rtl8169_wait_for_quiescence(dev); | 2333 | rtl8169_wait_for_quiescence(dev); |
2324 | 2334 | ||
2325 | rtl8169_rx_interrupt(dev, tp, tp->mmio_addr); | 2335 | rtl8169_rx_interrupt(dev, tp, tp->mmio_addr, ~(u32)0); |
2326 | rtl8169_tx_clear(tp); | 2336 | rtl8169_tx_clear(tp); |
2327 | 2337 | ||
2328 | if (tp->dirty_rx == tp->cur_rx) { | 2338 | if (tp->dirty_rx == tp->cur_rx) { |
@@ -2636,14 +2646,14 @@ out: | |||
2636 | 2646 | ||
2637 | static int rtl8169_rx_interrupt(struct net_device *dev, | 2647 | static int rtl8169_rx_interrupt(struct net_device *dev, |
2638 | struct rtl8169_private *tp, | 2648 | struct rtl8169_private *tp, |
2639 | void __iomem *ioaddr) | 2649 | void __iomem *ioaddr, u32 budget) |
2640 | { | 2650 | { |
2641 | unsigned int cur_rx, rx_left; | 2651 | unsigned int cur_rx, rx_left; |
2642 | unsigned int delta, count; | 2652 | unsigned int delta, count; |
2643 | 2653 | ||
2644 | cur_rx = tp->cur_rx; | 2654 | cur_rx = tp->cur_rx; |
2645 | rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; | 2655 | rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; |
2646 | rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota); | 2656 | rx_left = rtl8169_rx_quota(rx_left, budget); |
2647 | 2657 | ||
2648 | for (; rx_left > 0; rx_left--, cur_rx++) { | 2658 | for (; rx_left > 0; rx_left--, cur_rx++) { |
2649 | unsigned int entry = cur_rx % NUM_RX_DESC; | 2659 | unsigned int entry = cur_rx % NUM_RX_DESC; |
@@ -2792,8 +2802,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
2792 | RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); | 2802 | RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); |
2793 | tp->intr_mask = ~tp->napi_event; | 2803 | tp->intr_mask = ~tp->napi_event; |
2794 | 2804 | ||
2795 | if (likely(netif_rx_schedule_prep(dev))) | 2805 | if (likely(netif_rx_schedule_prep(dev, &tp->napi))) |
2796 | __netif_rx_schedule(dev); | 2806 | __netif_rx_schedule(dev, &tp->napi); |
2797 | else if (netif_msg_intr(tp)) { | 2807 | else if (netif_msg_intr(tp)) { |
2798 | printk(KERN_INFO "%s: interrupt %04x in poll\n", | 2808 | printk(KERN_INFO "%s: interrupt %04x in poll\n", |
2799 | dev->name, status); | 2809 | dev->name, status); |
@@ -2803,7 +2813,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
2803 | #else | 2813 | #else |
2804 | /* Rx interrupt */ | 2814 | /* Rx interrupt */ |
2805 | if (status & (RxOK | RxOverflow | RxFIFOOver)) | 2815 | if (status & (RxOK | RxOverflow | RxFIFOOver)) |
2806 | rtl8169_rx_interrupt(dev, tp, ioaddr); | 2816 | rtl8169_rx_interrupt(dev, tp, ioaddr, ~(u32)0); |
2807 | 2817 | ||
2808 | /* Tx interrupt */ | 2818 | /* Tx interrupt */ |
2809 | if (status & (TxOK | TxErr)) | 2819 | if (status & (TxOK | TxErr)) |
@@ -2826,20 +2836,18 @@ out: | |||
2826 | } | 2836 | } |
2827 | 2837 | ||
2828 | #ifdef CONFIG_R8169_NAPI | 2838 | #ifdef CONFIG_R8169_NAPI |
2829 | static int rtl8169_poll(struct net_device *dev, int *budget) | 2839 | static int rtl8169_poll(struct napi_struct *napi, int budget) |
2830 | { | 2840 | { |
2831 | unsigned int work_done, work_to_do = min(*budget, dev->quota); | 2841 | struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); |
2832 | struct rtl8169_private *tp = netdev_priv(dev); | 2842 | struct net_device *dev = tp->dev; |
2833 | void __iomem *ioaddr = tp->mmio_addr; | 2843 | void __iomem *ioaddr = tp->mmio_addr; |
2844 | int work_done; | ||
2834 | 2845 | ||
2835 | work_done = rtl8169_rx_interrupt(dev, tp, ioaddr); | 2846 | work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); |
2836 | rtl8169_tx_interrupt(dev, tp, ioaddr); | 2847 | rtl8169_tx_interrupt(dev, tp, ioaddr); |
2837 | 2848 | ||
2838 | *budget -= work_done; | 2849 | if (work_done < budget) { |
2839 | dev->quota -= work_done; | 2850 | netif_rx_complete(dev, napi); |
2840 | |||
2841 | if (work_done < work_to_do) { | ||
2842 | netif_rx_complete(dev); | ||
2843 | tp->intr_mask = 0xffff; | 2851 | tp->intr_mask = 0xffff; |
2844 | /* | 2852 | /* |
2845 | * 20040426: the barrier is not strictly required but the | 2853 | * 20040426: the barrier is not strictly required but the |
@@ -2851,7 +2859,7 @@ static int rtl8169_poll(struct net_device *dev, int *budget) | |||
2851 | RTL_W16(IntrMask, tp->intr_event); | 2859 | RTL_W16(IntrMask, tp->intr_event); |
2852 | } | 2860 | } |
2853 | 2861 | ||
2854 | return (work_done >= work_to_do); | 2862 | return work_done; |
2855 | } | 2863 | } |
2856 | #endif | 2864 | #endif |
2857 | 2865 | ||
@@ -2880,7 +2888,7 @@ core_down: | |||
2880 | synchronize_irq(dev->irq); | 2888 | synchronize_irq(dev->irq); |
2881 | 2889 | ||
2882 | if (!poll_locked) { | 2890 | if (!poll_locked) { |
2883 | netif_poll_disable(dev); | 2891 | napi_disable(&tp->napi); |
2884 | poll_locked++; | 2892 | poll_locked++; |
2885 | } | 2893 | } |
2886 | 2894 | ||
@@ -2918,8 +2926,6 @@ static int rtl8169_close(struct net_device *dev) | |||
2918 | 2926 | ||
2919 | free_irq(dev->irq, dev); | 2927 | free_irq(dev->irq, dev); |
2920 | 2928 | ||
2921 | netif_poll_enable(dev); | ||
2922 | |||
2923 | pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, | 2929 | pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, |
2924 | tp->RxPhyAddr); | 2930 | tp->RxPhyAddr); |
2925 | pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, | 2931 | pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 24feb00600ee..dd012322cdbe 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -2568,7 +2568,7 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2568 | 2568 | ||
2569 | /** | 2569 | /** |
2570 | * s2io_poll - Rx interrupt handler for NAPI support | 2570 | * s2io_poll - Rx interrupt handler for NAPI support |
2571 | * @dev : pointer to the device structure. | 2571 | * @napi : pointer to the napi structure. |
2572 | * @budget : The number of packets that were budgeted to be processed | 2572 | * @budget : The number of packets that were budgeted to be processed |
2573 | * during one pass through the 'Poll" function. | 2573 | * during one pass through the 'Poll" function. |
2574 | * Description: | 2574 | * Description: |
@@ -2579,9 +2579,10 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2579 | * 0 on success and 1 if there are No Rx packets to be processed. | 2579 | * 0 on success and 1 if there are No Rx packets to be processed. |
2580 | */ | 2580 | */ |
2581 | 2581 | ||
2582 | static int s2io_poll(struct net_device *dev, int *budget) | 2582 | static int s2io_poll(struct napi_struct *napi, int budget) |
2583 | { | 2583 | { |
2584 | struct s2io_nic *nic = dev->priv; | 2584 | struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); |
2585 | struct net_device *dev = nic->dev; | ||
2585 | int pkt_cnt = 0, org_pkts_to_process; | 2586 | int pkt_cnt = 0, org_pkts_to_process; |
2586 | struct mac_info *mac_control; | 2587 | struct mac_info *mac_control; |
2587 | struct config_param *config; | 2588 | struct config_param *config; |
@@ -2592,9 +2593,7 @@ static int s2io_poll(struct net_device *dev, int *budget) | |||
2592 | mac_control = &nic->mac_control; | 2593 | mac_control = &nic->mac_control; |
2593 | config = &nic->config; | 2594 | config = &nic->config; |
2594 | 2595 | ||
2595 | nic->pkts_to_process = *budget; | 2596 | nic->pkts_to_process = budget; |
2596 | if (nic->pkts_to_process > dev->quota) | ||
2597 | nic->pkts_to_process = dev->quota; | ||
2598 | org_pkts_to_process = nic->pkts_to_process; | 2597 | org_pkts_to_process = nic->pkts_to_process; |
2599 | 2598 | ||
2600 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); | 2599 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); |
@@ -2608,12 +2607,8 @@ static int s2io_poll(struct net_device *dev, int *budget) | |||
2608 | goto no_rx; | 2607 | goto no_rx; |
2609 | } | 2608 | } |
2610 | } | 2609 | } |
2611 | if (!pkt_cnt) | ||
2612 | pkt_cnt = 1; | ||
2613 | 2610 | ||
2614 | dev->quota -= pkt_cnt; | 2611 | netif_rx_complete(dev, napi); |
2615 | *budget -= pkt_cnt; | ||
2616 | netif_rx_complete(dev); | ||
2617 | 2612 | ||
2618 | for (i = 0; i < config->rx_ring_num; i++) { | 2613 | for (i = 0; i < config->rx_ring_num; i++) { |
2619 | if (fill_rx_buffers(nic, i) == -ENOMEM) { | 2614 | if (fill_rx_buffers(nic, i) == -ENOMEM) { |
@@ -2626,12 +2621,9 @@ static int s2io_poll(struct net_device *dev, int *budget) | |||
2626 | writeq(0x0, &bar0->rx_traffic_mask); | 2621 | writeq(0x0, &bar0->rx_traffic_mask); |
2627 | readl(&bar0->rx_traffic_mask); | 2622 | readl(&bar0->rx_traffic_mask); |
2628 | atomic_dec(&nic->isr_cnt); | 2623 | atomic_dec(&nic->isr_cnt); |
2629 | return 0; | 2624 | return pkt_cnt; |
2630 | 2625 | ||
2631 | no_rx: | 2626 | no_rx: |
2632 | dev->quota -= pkt_cnt; | ||
2633 | *budget -= pkt_cnt; | ||
2634 | |||
2635 | for (i = 0; i < config->rx_ring_num; i++) { | 2627 | for (i = 0; i < config->rx_ring_num; i++) { |
2636 | if (fill_rx_buffers(nic, i) == -ENOMEM) { | 2628 | if (fill_rx_buffers(nic, i) == -ENOMEM) { |
2637 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2629 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
@@ -2640,7 +2632,7 @@ no_rx: | |||
2640 | } | 2632 | } |
2641 | } | 2633 | } |
2642 | atomic_dec(&nic->isr_cnt); | 2634 | atomic_dec(&nic->isr_cnt); |
2643 | return 1; | 2635 | return pkt_cnt; |
2644 | } | 2636 | } |
2645 | 2637 | ||
2646 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2638 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -3809,6 +3801,8 @@ static int s2io_open(struct net_device *dev) | |||
3809 | netif_carrier_off(dev); | 3801 | netif_carrier_off(dev); |
3810 | sp->last_link_state = 0; | 3802 | sp->last_link_state = 0; |
3811 | 3803 | ||
3804 | napi_enable(&sp->napi); | ||
3805 | |||
3812 | /* Initialize H/W and enable interrupts */ | 3806 | /* Initialize H/W and enable interrupts */ |
3813 | err = s2io_card_up(sp); | 3807 | err = s2io_card_up(sp); |
3814 | if (err) { | 3808 | if (err) { |
@@ -3828,6 +3822,7 @@ static int s2io_open(struct net_device *dev) | |||
3828 | return 0; | 3822 | return 0; |
3829 | 3823 | ||
3830 | hw_init_failed: | 3824 | hw_init_failed: |
3825 | napi_disable(&sp->napi); | ||
3831 | if (sp->intr_type == MSI_X) { | 3826 | if (sp->intr_type == MSI_X) { |
3832 | if (sp->entries) { | 3827 | if (sp->entries) { |
3833 | kfree(sp->entries); | 3828 | kfree(sp->entries); |
@@ -3861,6 +3856,7 @@ static int s2io_close(struct net_device *dev) | |||
3861 | struct s2io_nic *sp = dev->priv; | 3856 | struct s2io_nic *sp = dev->priv; |
3862 | 3857 | ||
3863 | netif_stop_queue(dev); | 3858 | netif_stop_queue(dev); |
3859 | napi_disable(&sp->napi); | ||
3864 | /* Reset card, kill tasklet and free Tx and Rx buffers. */ | 3860 | /* Reset card, kill tasklet and free Tx and Rx buffers. */ |
3865 | s2io_card_down(sp); | 3861 | s2io_card_down(sp); |
3866 | 3862 | ||
@@ -4232,8 +4228,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4232 | 4228 | ||
4233 | if (napi) { | 4229 | if (napi) { |
4234 | if (reason & GEN_INTR_RXTRAFFIC) { | 4230 | if (reason & GEN_INTR_RXTRAFFIC) { |
4235 | if ( likely ( netif_rx_schedule_prep(dev)) ) { | 4231 | if (likely (netif_rx_schedule_prep(dev, &sp->napi))) { |
4236 | __netif_rx_schedule(dev); | 4232 | __netif_rx_schedule(dev, &sp->napi); |
4237 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); | 4233 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); |
4238 | } | 4234 | } |
4239 | else | 4235 | else |
@@ -7215,8 +7211,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7215 | * will use eth_mac_addr() for dev->set_mac_address | 7211 | * will use eth_mac_addr() for dev->set_mac_address |
7216 | * mac address will be set every time dev->open() is called | 7212 | * mac address will be set every time dev->open() is called |
7217 | */ | 7213 | */ |
7218 | dev->poll = s2io_poll; | 7214 | netif_napi_add(dev, &sp->napi, s2io_poll, 32); |
7219 | dev->weight = 32; | ||
7220 | 7215 | ||
7221 | #ifdef CONFIG_NET_POLL_CONTROLLER | 7216 | #ifdef CONFIG_NET_POLL_CONTROLLER |
7222 | dev->poll_controller = s2io_netpoll; | 7217 | dev->poll_controller = s2io_netpoll; |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 92983ee7df8c..420fefb99188 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -786,6 +786,7 @@ struct s2io_nic { | |||
786 | */ | 786 | */ |
787 | int pkts_to_process; | 787 | int pkts_to_process; |
788 | struct net_device *dev; | 788 | struct net_device *dev; |
789 | struct napi_struct napi; | ||
789 | struct mac_info mac_control; | 790 | struct mac_info mac_control; |
790 | struct config_param config; | 791 | struct config_param config; |
791 | struct pci_dev *pdev; | 792 | struct pci_dev *pdev; |
@@ -1019,7 +1020,7 @@ static void s2io_set_multicast(struct net_device *dev); | |||
1019 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); | 1020 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); |
1020 | static void s2io_link(struct s2io_nic * sp, int link); | 1021 | static void s2io_link(struct s2io_nic * sp, int link); |
1021 | static void s2io_reset(struct s2io_nic * sp); | 1022 | static void s2io_reset(struct s2io_nic * sp); |
1022 | static int s2io_poll(struct net_device *dev, int *budget); | 1023 | static int s2io_poll(struct napi_struct *napi, int budget); |
1023 | static void s2io_init_pci(struct s2io_nic * sp); | 1024 | static void s2io_init_pci(struct s2io_nic * sp); |
1024 | static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); | 1025 | static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); |
1025 | static void s2io_alarm_handle(unsigned long data); | 1026 | static void s2io_alarm_handle(unsigned long data); |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index e7fdcf15b5a7..53845ebb649f 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -238,6 +238,7 @@ struct sbmac_softc { | |||
238 | */ | 238 | */ |
239 | 239 | ||
240 | struct net_device *sbm_dev; /* pointer to linux device */ | 240 | struct net_device *sbm_dev; /* pointer to linux device */ |
241 | struct napi_struct napi; | ||
241 | spinlock_t sbm_lock; /* spin lock */ | 242 | spinlock_t sbm_lock; /* spin lock */ |
242 | struct timer_list sbm_timer; /* for monitoring MII */ | 243 | struct timer_list sbm_timer; /* for monitoring MII */ |
243 | struct net_device_stats sbm_stats; | 244 | struct net_device_stats sbm_stats; |
@@ -320,7 +321,7 @@ static struct net_device_stats *sbmac_get_stats(struct net_device *dev); | |||
320 | static void sbmac_set_rx_mode(struct net_device *dev); | 321 | static void sbmac_set_rx_mode(struct net_device *dev); |
321 | static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 322 | static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
322 | static int sbmac_close(struct net_device *dev); | 323 | static int sbmac_close(struct net_device *dev); |
323 | static int sbmac_poll(struct net_device *poll_dev, int *budget); | 324 | static int sbmac_poll(struct napi_struct *napi, int budget); |
324 | 325 | ||
325 | static int sbmac_mii_poll(struct sbmac_softc *s,int noisy); | 326 | static int sbmac_mii_poll(struct sbmac_softc *s,int noisy); |
326 | static int sbmac_mii_probe(struct net_device *dev); | 327 | static int sbmac_mii_probe(struct net_device *dev); |
@@ -2154,20 +2155,13 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance) | |||
2154 | * Transmits on channel 0 | 2155 | * Transmits on channel 0 |
2155 | */ | 2156 | */ |
2156 | 2157 | ||
2157 | if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) { | 2158 | if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) |
2158 | sbdma_tx_process(sc,&(sc->sbm_txdma), 0); | 2159 | sbdma_tx_process(sc,&(sc->sbm_txdma), 0); |
2159 | #ifdef CONFIG_NETPOLL_TRAP | ||
2160 | if (netpoll_trap()) { | ||
2161 | if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) | ||
2162 | __netif_schedule(dev); | ||
2163 | } | ||
2164 | #endif | ||
2165 | } | ||
2166 | 2160 | ||
2167 | if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { | 2161 | if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { |
2168 | if (netif_rx_schedule_prep(dev)) { | 2162 | if (netif_rx_schedule_prep(dev, &sc->napi)) { |
2169 | __raw_writeq(0, sc->sbm_imr); | 2163 | __raw_writeq(0, sc->sbm_imr); |
2170 | __netif_rx_schedule(dev); | 2164 | __netif_rx_schedule(dev, &sc->napi); |
2171 | /* Depend on the exit from poll to reenable intr */ | 2165 | /* Depend on the exit from poll to reenable intr */ |
2172 | } | 2166 | } |
2173 | else { | 2167 | else { |
@@ -2470,8 +2464,8 @@ static int sbmac_init(struct net_device *dev, int idx) | |||
2470 | dev->do_ioctl = sbmac_mii_ioctl; | 2464 | dev->do_ioctl = sbmac_mii_ioctl; |
2471 | dev->tx_timeout = sbmac_tx_timeout; | 2465 | dev->tx_timeout = sbmac_tx_timeout; |
2472 | dev->watchdog_timeo = TX_TIMEOUT; | 2466 | dev->watchdog_timeo = TX_TIMEOUT; |
2473 | dev->poll = sbmac_poll; | 2467 | |
2474 | dev->weight = 16; | 2468 | netif_napi_add(dev, &sc->napi, sbmac_poll, 16); |
2475 | 2469 | ||
2476 | dev->change_mtu = sb1250_change_mtu; | 2470 | dev->change_mtu = sb1250_change_mtu; |
2477 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2471 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -2537,6 +2531,8 @@ static int sbmac_open(struct net_device *dev) | |||
2537 | return -EINVAL; | 2531 | return -EINVAL; |
2538 | } | 2532 | } |
2539 | 2533 | ||
2534 | napi_enable(&sc->napi); | ||
2535 | |||
2540 | /* | 2536 | /* |
2541 | * Configure default speed | 2537 | * Configure default speed |
2542 | */ | 2538 | */ |
@@ -2850,6 +2846,8 @@ static int sbmac_close(struct net_device *dev) | |||
2850 | unsigned long flags; | 2846 | unsigned long flags; |
2851 | int irq; | 2847 | int irq; |
2852 | 2848 | ||
2849 | napi_disable(&sc->napi); | ||
2850 | |||
2853 | sbmac_set_channel_state(sc,sbmac_state_off); | 2851 | sbmac_set_channel_state(sc,sbmac_state_off); |
2854 | 2852 | ||
2855 | del_timer_sync(&sc->sbm_timer); | 2853 | del_timer_sync(&sc->sbm_timer); |
@@ -2874,26 +2872,17 @@ static int sbmac_close(struct net_device *dev) | |||
2874 | return 0; | 2872 | return 0; |
2875 | } | 2873 | } |
2876 | 2874 | ||
2877 | static int sbmac_poll(struct net_device *dev, int *budget) | 2875 | static int sbmac_poll(struct napi_struct *napi, int budget) |
2878 | { | 2876 | { |
2879 | int work_to_do; | 2877 | struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); |
2878 | struct net_device *dev = sc->sbm_dev; | ||
2880 | int work_done; | 2879 | int work_done; |
2881 | struct sbmac_softc *sc = netdev_priv(dev); | ||
2882 | |||
2883 | work_to_do = min(*budget, dev->quota); | ||
2884 | work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), work_to_do, 1); | ||
2885 | |||
2886 | if (work_done > work_to_do) | ||
2887 | printk(KERN_ERR "%s exceeded work_to_do budget=%d quota=%d work-done=%d\n", | ||
2888 | sc->sbm_dev->name, *budget, dev->quota, work_done); | ||
2889 | 2880 | ||
2881 | work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); | ||
2890 | sbdma_tx_process(sc, &(sc->sbm_txdma), 1); | 2882 | sbdma_tx_process(sc, &(sc->sbm_txdma), 1); |
2891 | 2883 | ||
2892 | *budget -= work_done; | 2884 | if (work_done < budget) { |
2893 | dev->quota -= work_done; | 2885 | netif_rx_complete(dev, napi); |
2894 | |||
2895 | if (work_done < work_to_do) { | ||
2896 | netif_rx_complete(dev); | ||
2897 | 2886 | ||
2898 | #ifdef CONFIG_SBMAC_COALESCE | 2887 | #ifdef CONFIG_SBMAC_COALESCE |
2899 | __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | | 2888 | __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | |
@@ -2905,7 +2894,7 @@ static int sbmac_poll(struct net_device *dev, int *budget) | |||
2905 | #endif | 2894 | #endif |
2906 | } | 2895 | } |
2907 | 2896 | ||
2908 | return (work_done >= work_to_do); | 2897 | return work_done; |
2909 | } | 2898 | } |
2910 | 2899 | ||
2911 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) | 2900 | #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index d470b19c0810..038ccfbafdd1 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -47,24 +47,13 @@ | |||
47 | #define PHY_ID_ANY 0x1f | 47 | #define PHY_ID_ANY 0x1f |
48 | #define MII_REG_ANY 0x1f | 48 | #define MII_REG_ANY 0x1f |
49 | 49 | ||
50 | #ifdef CONFIG_SIS190_NAPI | 50 | #define DRV_VERSION "1.2" |
51 | #define NAPI_SUFFIX "-NAPI" | ||
52 | #else | ||
53 | #define NAPI_SUFFIX "" | ||
54 | #endif | ||
55 | |||
56 | #define DRV_VERSION "1.2" NAPI_SUFFIX | ||
57 | #define DRV_NAME "sis190" | 51 | #define DRV_NAME "sis190" |
58 | #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION | 52 | #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION |
59 | #define PFX DRV_NAME ": " | 53 | #define PFX DRV_NAME ": " |
60 | 54 | ||
61 | #ifdef CONFIG_SIS190_NAPI | ||
62 | #define sis190_rx_skb netif_receive_skb | ||
63 | #define sis190_rx_quota(count, quota) min(count, quota) | ||
64 | #else | ||
65 | #define sis190_rx_skb netif_rx | 55 | #define sis190_rx_skb netif_rx |
66 | #define sis190_rx_quota(count, quota) count | 56 | #define sis190_rx_quota(count, quota) count |
67 | #endif | ||
68 | 57 | ||
69 | #define MAC_ADDR_LEN 6 | 58 | #define MAC_ADDR_LEN 6 |
70 | 59 | ||
@@ -1115,10 +1104,8 @@ static void sis190_down(struct net_device *dev) | |||
1115 | 1104 | ||
1116 | synchronize_irq(dev->irq); | 1105 | synchronize_irq(dev->irq); |
1117 | 1106 | ||
1118 | if (!poll_locked) { | 1107 | if (!poll_locked) |
1119 | netif_poll_disable(dev); | ||
1120 | poll_locked++; | 1108 | poll_locked++; |
1121 | } | ||
1122 | 1109 | ||
1123 | synchronize_sched(); | 1110 | synchronize_sched(); |
1124 | 1111 | ||
@@ -1137,8 +1124,6 @@ static int sis190_close(struct net_device *dev) | |||
1137 | 1124 | ||
1138 | free_irq(dev->irq, dev); | 1125 | free_irq(dev->irq, dev); |
1139 | 1126 | ||
1140 | netif_poll_enable(dev); | ||
1141 | |||
1142 | pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); | 1127 | pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); |
1143 | pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); | 1128 | pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); |
1144 | 1129 | ||
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index e3d8520209b8..0bf46ed4e684 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -2528,7 +2528,7 @@ static int skge_up(struct net_device *dev) | |||
2528 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2528 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2529 | spin_unlock_irq(&hw->hw_lock); | 2529 | spin_unlock_irq(&hw->hw_lock); |
2530 | 2530 | ||
2531 | netif_poll_enable(dev); | 2531 | napi_enable(&skge->napi); |
2532 | return 0; | 2532 | return 0; |
2533 | 2533 | ||
2534 | free_rx_ring: | 2534 | free_rx_ring: |
@@ -2558,7 +2558,7 @@ static int skge_down(struct net_device *dev) | |||
2558 | if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) | 2558 | if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) |
2559 | del_timer_sync(&skge->link_timer); | 2559 | del_timer_sync(&skge->link_timer); |
2560 | 2560 | ||
2561 | netif_poll_disable(dev); | 2561 | napi_disable(&skge->napi); |
2562 | netif_carrier_off(dev); | 2562 | netif_carrier_off(dev); |
2563 | 2563 | ||
2564 | spin_lock_irq(&hw->hw_lock); | 2564 | spin_lock_irq(&hw->hw_lock); |
@@ -3044,14 +3044,13 @@ static void skge_tx_done(struct net_device *dev) | |||
3044 | } | 3044 | } |
3045 | } | 3045 | } |
3046 | 3046 | ||
3047 | static int skge_poll(struct net_device *dev, int *budget) | 3047 | static int skge_poll(struct napi_struct *napi, int to_do) |
3048 | { | 3048 | { |
3049 | struct skge_port *skge = netdev_priv(dev); | 3049 | struct skge_port *skge = container_of(napi, struct skge_port, napi); |
3050 | struct net_device *dev = skge->netdev; | ||
3050 | struct skge_hw *hw = skge->hw; | 3051 | struct skge_hw *hw = skge->hw; |
3051 | struct skge_ring *ring = &skge->rx_ring; | 3052 | struct skge_ring *ring = &skge->rx_ring; |
3052 | struct skge_element *e; | 3053 | struct skge_element *e; |
3053 | unsigned long flags; | ||
3054 | int to_do = min(dev->quota, *budget); | ||
3055 | int work_done = 0; | 3054 | int work_done = 0; |
3056 | 3055 | ||
3057 | skge_tx_done(dev); | 3056 | skge_tx_done(dev); |
@@ -3082,20 +3081,16 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
3082 | wmb(); | 3081 | wmb(); |
3083 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); | 3082 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); |
3084 | 3083 | ||
3085 | *budget -= work_done; | 3084 | if (work_done < to_do) { |
3086 | dev->quota -= work_done; | 3085 | spin_lock_irq(&hw->hw_lock); |
3087 | 3086 | __netif_rx_complete(dev, napi); | |
3088 | if (work_done >= to_do) | 3087 | hw->intr_mask |= napimask[skge->port]; |
3089 | return 1; /* not done */ | 3088 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
3090 | 3089 | skge_read32(hw, B0_IMSK); | |
3091 | spin_lock_irqsave(&hw->hw_lock, flags); | 3090 | spin_unlock_irq(&hw->hw_lock); |
3092 | __netif_rx_complete(dev); | 3091 | } |
3093 | hw->intr_mask |= napimask[skge->port]; | ||
3094 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
3095 | skge_read32(hw, B0_IMSK); | ||
3096 | spin_unlock_irqrestore(&hw->hw_lock, flags); | ||
3097 | 3092 | ||
3098 | return 0; | 3093 | return work_done; |
3099 | } | 3094 | } |
3100 | 3095 | ||
3101 | /* Parity errors seem to happen when Genesis is connected to a switch | 3096 | /* Parity errors seem to happen when Genesis is connected to a switch |
@@ -3252,8 +3247,9 @@ static irqreturn_t skge_intr(int irq, void *dev_id) | |||
3252 | } | 3247 | } |
3253 | 3248 | ||
3254 | if (status & (IS_XA1_F|IS_R1_F)) { | 3249 | if (status & (IS_XA1_F|IS_R1_F)) { |
3250 | struct skge_port *skge = netdev_priv(hw->dev[0]); | ||
3255 | hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); | 3251 | hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); |
3256 | netif_rx_schedule(hw->dev[0]); | 3252 | netif_rx_schedule(hw->dev[0], &skge->napi); |
3257 | } | 3253 | } |
3258 | 3254 | ||
3259 | if (status & IS_PA_TO_TX1) | 3255 | if (status & IS_PA_TO_TX1) |
@@ -3271,13 +3267,14 @@ static irqreturn_t skge_intr(int irq, void *dev_id) | |||
3271 | skge_mac_intr(hw, 0); | 3267 | skge_mac_intr(hw, 0); |
3272 | 3268 | ||
3273 | if (hw->dev[1]) { | 3269 | if (hw->dev[1]) { |
3270 | struct skge_port *skge = netdev_priv(hw->dev[1]); | ||
3271 | |||
3274 | if (status & (IS_XA2_F|IS_R2_F)) { | 3272 | if (status & (IS_XA2_F|IS_R2_F)) { |
3275 | hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); | 3273 | hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); |
3276 | netif_rx_schedule(hw->dev[1]); | 3274 | netif_rx_schedule(hw->dev[1], &skge->napi); |
3277 | } | 3275 | } |
3278 | 3276 | ||
3279 | if (status & IS_PA_TO_RX2) { | 3277 | if (status & IS_PA_TO_RX2) { |
3280 | struct skge_port *skge = netdev_priv(hw->dev[1]); | ||
3281 | ++skge->net_stats.rx_over_errors; | 3278 | ++skge->net_stats.rx_over_errors; |
3282 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); | 3279 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); |
3283 | } | 3280 | } |
@@ -3569,8 +3566,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3569 | SET_ETHTOOL_OPS(dev, &skge_ethtool_ops); | 3566 | SET_ETHTOOL_OPS(dev, &skge_ethtool_ops); |
3570 | dev->tx_timeout = skge_tx_timeout; | 3567 | dev->tx_timeout = skge_tx_timeout; |
3571 | dev->watchdog_timeo = TX_WATCHDOG; | 3568 | dev->watchdog_timeo = TX_WATCHDOG; |
3572 | dev->poll = skge_poll; | ||
3573 | dev->weight = NAPI_WEIGHT; | ||
3574 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3569 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3575 | dev->poll_controller = skge_netpoll; | 3570 | dev->poll_controller = skge_netpoll; |
3576 | #endif | 3571 | #endif |
@@ -3580,6 +3575,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3580 | dev->features |= NETIF_F_HIGHDMA; | 3575 | dev->features |= NETIF_F_HIGHDMA; |
3581 | 3576 | ||
3582 | skge = netdev_priv(dev); | 3577 | skge = netdev_priv(dev); |
3578 | netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); | ||
3583 | skge->netdev = dev; | 3579 | skge->netdev = dev; |
3584 | skge->hw = hw; | 3580 | skge->hw = hw; |
3585 | skge->msg_enable = netif_msg_init(debug, default_msg); | 3581 | skge->msg_enable = netif_msg_init(debug, default_msg); |
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index edd71468220c..dd0fd45c7155 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
@@ -2448,6 +2448,7 @@ enum pause_status { | |||
2448 | struct skge_port { | 2448 | struct skge_port { |
2449 | struct skge_hw *hw; | 2449 | struct skge_hw *hw; |
2450 | struct net_device *netdev; | 2450 | struct net_device *netdev; |
2451 | struct napi_struct napi; | ||
2451 | int port; | 2452 | int port; |
2452 | u32 msg_enable; | 2453 | u32 msg_enable; |
2453 | 2454 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index ea117fc3d5e3..a0d75b0f3798 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1130,7 +1130,7 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp | |||
1130 | u16 port = sky2->port; | 1130 | u16 port = sky2->port; |
1131 | 1131 | ||
1132 | netif_tx_lock_bh(dev); | 1132 | netif_tx_lock_bh(dev); |
1133 | netif_poll_disable(sky2->hw->dev[0]); | 1133 | napi_disable(&hw->napi); |
1134 | 1134 | ||
1135 | sky2->vlgrp = grp; | 1135 | sky2->vlgrp = grp; |
1136 | if (grp) { | 1136 | if (grp) { |
@@ -1145,7 +1145,7 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp | |||
1145 | TX_VLAN_TAG_OFF); | 1145 | TX_VLAN_TAG_OFF); |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | netif_poll_enable(sky2->hw->dev[0]); | 1148 | napi_enable(&hw->napi); |
1149 | netif_tx_unlock_bh(dev); | 1149 | netif_tx_unlock_bh(dev); |
1150 | } | 1150 | } |
1151 | #endif | 1151 | #endif |
@@ -1385,9 +1385,13 @@ static int sky2_up(struct net_device *dev) | |||
1385 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, | 1385 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, |
1386 | TX_RING_SIZE - 1); | 1386 | TX_RING_SIZE - 1); |
1387 | 1387 | ||
1388 | napi_enable(&hw->napi); | ||
1389 | |||
1388 | err = sky2_rx_start(sky2); | 1390 | err = sky2_rx_start(sky2); |
1389 | if (err) | 1391 | if (err) { |
1392 | napi_disable(&hw->napi); | ||
1390 | goto err_out; | 1393 | goto err_out; |
1394 | } | ||
1391 | 1395 | ||
1392 | /* Enable interrupts from phy/mac for port */ | 1396 | /* Enable interrupts from phy/mac for port */ |
1393 | imask = sky2_read32(hw, B0_IMSK); | 1397 | imask = sky2_read32(hw, B0_IMSK); |
@@ -1676,6 +1680,8 @@ static int sky2_down(struct net_device *dev) | |||
1676 | /* Stop more packets from being queued */ | 1680 | /* Stop more packets from being queued */ |
1677 | netif_stop_queue(dev); | 1681 | netif_stop_queue(dev); |
1678 | 1682 | ||
1683 | napi_disable(&hw->napi); | ||
1684 | |||
1679 | /* Disable port IRQ */ | 1685 | /* Disable port IRQ */ |
1680 | imask = sky2_read32(hw, B0_IMSK); | 1686 | imask = sky2_read32(hw, B0_IMSK); |
1681 | imask &= ~portirq_msk[port]; | 1687 | imask &= ~portirq_msk[port]; |
@@ -2016,7 +2022,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
2016 | 2022 | ||
2017 | dev->trans_start = jiffies; /* prevent tx timeout */ | 2023 | dev->trans_start = jiffies; /* prevent tx timeout */ |
2018 | netif_stop_queue(dev); | 2024 | netif_stop_queue(dev); |
2019 | netif_poll_disable(hw->dev[0]); | 2025 | napi_disable(&hw->napi); |
2020 | 2026 | ||
2021 | synchronize_irq(hw->pdev->irq); | 2027 | synchronize_irq(hw->pdev->irq); |
2022 | 2028 | ||
@@ -2043,12 +2049,16 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
2043 | err = sky2_rx_start(sky2); | 2049 | err = sky2_rx_start(sky2); |
2044 | sky2_write32(hw, B0_IMSK, imask); | 2050 | sky2_write32(hw, B0_IMSK, imask); |
2045 | 2051 | ||
2052 | /* Unconditionally re-enable NAPI because even if we | ||
2053 | * call dev_close() that will do a napi_disable(). | ||
2054 | */ | ||
2055 | napi_enable(&hw->napi); | ||
2056 | |||
2046 | if (err) | 2057 | if (err) |
2047 | dev_close(dev); | 2058 | dev_close(dev); |
2048 | else { | 2059 | else { |
2049 | gma_write16(hw, port, GM_GP_CTRL, ctl); | 2060 | gma_write16(hw, port, GM_GP_CTRL, ctl); |
2050 | 2061 | ||
2051 | netif_poll_enable(hw->dev[0]); | ||
2052 | netif_wake_queue(dev); | 2062 | netif_wake_queue(dev); |
2053 | } | 2063 | } |
2054 | 2064 | ||
@@ -2544,18 +2554,15 @@ static int sky2_rx_hung(struct net_device *dev) | |||
2544 | static void sky2_watchdog(unsigned long arg) | 2554 | static void sky2_watchdog(unsigned long arg) |
2545 | { | 2555 | { |
2546 | struct sky2_hw *hw = (struct sky2_hw *) arg; | 2556 | struct sky2_hw *hw = (struct sky2_hw *) arg; |
2547 | struct net_device *dev; | ||
2548 | 2557 | ||
2549 | /* Check for lost IRQ once a second */ | 2558 | /* Check for lost IRQ once a second */ |
2550 | if (sky2_read32(hw, B0_ISRC)) { | 2559 | if (sky2_read32(hw, B0_ISRC)) { |
2551 | dev = hw->dev[0]; | 2560 | napi_schedule(&hw->napi); |
2552 | if (__netif_rx_schedule_prep(dev)) | ||
2553 | __netif_rx_schedule(dev); | ||
2554 | } else { | 2561 | } else { |
2555 | int i, active = 0; | 2562 | int i, active = 0; |
2556 | 2563 | ||
2557 | for (i = 0; i < hw->ports; i++) { | 2564 | for (i = 0; i < hw->ports; i++) { |
2558 | dev = hw->dev[i]; | 2565 | struct net_device *dev = hw->dev[i]; |
2559 | if (!netif_running(dev)) | 2566 | if (!netif_running(dev)) |
2560 | continue; | 2567 | continue; |
2561 | ++active; | 2568 | ++active; |
@@ -2605,11 +2612,11 @@ static void sky2_err_intr(struct sky2_hw *hw, u32 status) | |||
2605 | sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE); | 2612 | sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE); |
2606 | } | 2613 | } |
2607 | 2614 | ||
2608 | static int sky2_poll(struct net_device *dev0, int *budget) | 2615 | static int sky2_poll(struct napi_struct *napi, int work_limit) |
2609 | { | 2616 | { |
2610 | struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; | 2617 | struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); |
2611 | int work_done; | ||
2612 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | 2618 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); |
2619 | int work_done; | ||
2613 | 2620 | ||
2614 | if (unlikely(status & Y2_IS_ERROR)) | 2621 | if (unlikely(status & Y2_IS_ERROR)) |
2615 | sky2_err_intr(hw, status); | 2622 | sky2_err_intr(hw, status); |
@@ -2620,31 +2627,27 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
2620 | if (status & Y2_IS_IRQ_PHY2) | 2627 | if (status & Y2_IS_IRQ_PHY2) |
2621 | sky2_phy_intr(hw, 1); | 2628 | sky2_phy_intr(hw, 1); |
2622 | 2629 | ||
2623 | work_done = sky2_status_intr(hw, min(dev0->quota, *budget)); | 2630 | work_done = sky2_status_intr(hw, work_limit); |
2624 | *budget -= work_done; | ||
2625 | dev0->quota -= work_done; | ||
2626 | 2631 | ||
2627 | /* More work? */ | 2632 | /* More work? */ |
2628 | if (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)) | 2633 | if (hw->st_idx == sky2_read16(hw, STAT_PUT_IDX)) { |
2629 | return 1; | 2634 | /* Bug/Errata workaround? |
2635 | * Need to kick the TX irq moderation timer. | ||
2636 | */ | ||
2637 | if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) { | ||
2638 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); | ||
2639 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); | ||
2640 | } | ||
2630 | 2641 | ||
2631 | /* Bug/Errata workaround? | 2642 | napi_complete(napi); |
2632 | * Need to kick the TX irq moderation timer. | 2643 | sky2_read32(hw, B0_Y2_SP_LISR); |
2633 | */ | ||
2634 | if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) { | ||
2635 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); | ||
2636 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); | ||
2637 | } | 2644 | } |
2638 | netif_rx_complete(dev0); | 2645 | return work_done; |
2639 | |||
2640 | sky2_read32(hw, B0_Y2_SP_LISR); | ||
2641 | return 0; | ||
2642 | } | 2646 | } |
2643 | 2647 | ||
2644 | static irqreturn_t sky2_intr(int irq, void *dev_id) | 2648 | static irqreturn_t sky2_intr(int irq, void *dev_id) |
2645 | { | 2649 | { |
2646 | struct sky2_hw *hw = dev_id; | 2650 | struct sky2_hw *hw = dev_id; |
2647 | struct net_device *dev0 = hw->dev[0]; | ||
2648 | u32 status; | 2651 | u32 status; |
2649 | 2652 | ||
2650 | /* Reading this mask interrupts as side effect */ | 2653 | /* Reading this mask interrupts as side effect */ |
@@ -2653,8 +2656,8 @@ static irqreturn_t sky2_intr(int irq, void *dev_id) | |||
2653 | return IRQ_NONE; | 2656 | return IRQ_NONE; |
2654 | 2657 | ||
2655 | prefetch(&hw->st_le[hw->st_idx]); | 2658 | prefetch(&hw->st_le[hw->st_idx]); |
2656 | if (likely(__netif_rx_schedule_prep(dev0))) | 2659 | |
2657 | __netif_rx_schedule(dev0); | 2660 | napi_schedule(&hw->napi); |
2658 | 2661 | ||
2659 | return IRQ_HANDLED; | 2662 | return IRQ_HANDLED; |
2660 | } | 2663 | } |
@@ -2663,10 +2666,8 @@ static irqreturn_t sky2_intr(int irq, void *dev_id) | |||
2663 | static void sky2_netpoll(struct net_device *dev) | 2666 | static void sky2_netpoll(struct net_device *dev) |
2664 | { | 2667 | { |
2665 | struct sky2_port *sky2 = netdev_priv(dev); | 2668 | struct sky2_port *sky2 = netdev_priv(dev); |
2666 | struct net_device *dev0 = sky2->hw->dev[0]; | ||
2667 | 2669 | ||
2668 | if (netif_running(dev) && __netif_rx_schedule_prep(dev0)) | 2670 | napi_schedule(&sky2->hw->napi); |
2669 | __netif_rx_schedule(dev0); | ||
2670 | } | 2671 | } |
2671 | #endif | 2672 | #endif |
2672 | 2673 | ||
@@ -2914,8 +2915,6 @@ static void sky2_restart(struct work_struct *work) | |||
2914 | sky2_write32(hw, B0_IMSK, 0); | 2915 | sky2_write32(hw, B0_IMSK, 0); |
2915 | sky2_read32(hw, B0_IMSK); | 2916 | sky2_read32(hw, B0_IMSK); |
2916 | 2917 | ||
2917 | netif_poll_disable(hw->dev[0]); | ||
2918 | |||
2919 | for (i = 0; i < hw->ports; i++) { | 2918 | for (i = 0; i < hw->ports; i++) { |
2920 | dev = hw->dev[i]; | 2919 | dev = hw->dev[i]; |
2921 | if (netif_running(dev)) | 2920 | if (netif_running(dev)) |
@@ -2924,7 +2923,6 @@ static void sky2_restart(struct work_struct *work) | |||
2924 | 2923 | ||
2925 | sky2_reset(hw); | 2924 | sky2_reset(hw); |
2926 | sky2_write32(hw, B0_IMSK, Y2_IS_BASE); | 2925 | sky2_write32(hw, B0_IMSK, Y2_IS_BASE); |
2927 | netif_poll_enable(hw->dev[0]); | ||
2928 | 2926 | ||
2929 | for (i = 0; i < hw->ports; i++) { | 2927 | for (i = 0; i < hw->ports; i++) { |
2930 | dev = hw->dev[i]; | 2928 | dev = hw->dev[i]; |
@@ -3735,7 +3733,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) | |||
3735 | { | 3733 | { |
3736 | struct net_device *dev = seq->private; | 3734 | struct net_device *dev = seq->private; |
3737 | const struct sky2_port *sky2 = netdev_priv(dev); | 3735 | const struct sky2_port *sky2 = netdev_priv(dev); |
3738 | const struct sky2_hw *hw = sky2->hw; | 3736 | struct sky2_hw *hw = sky2->hw; |
3739 | unsigned port = sky2->port; | 3737 | unsigned port = sky2->port; |
3740 | unsigned idx, last; | 3738 | unsigned idx, last; |
3741 | int sop; | 3739 | int sop; |
@@ -3748,7 +3746,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) | |||
3748 | sky2_read32(hw, B0_IMSK), | 3746 | sky2_read32(hw, B0_IMSK), |
3749 | sky2_read32(hw, B0_Y2_SP_ICR)); | 3747 | sky2_read32(hw, B0_Y2_SP_ICR)); |
3750 | 3748 | ||
3751 | netif_poll_disable(hw->dev[0]); | 3749 | napi_disable(&hw->napi); |
3752 | last = sky2_read16(hw, STAT_PUT_IDX); | 3750 | last = sky2_read16(hw, STAT_PUT_IDX); |
3753 | 3751 | ||
3754 | if (hw->st_idx == last) | 3752 | if (hw->st_idx == last) |
@@ -3818,7 +3816,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) | |||
3818 | last = sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), | 3816 | last = sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), |
3819 | sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); | 3817 | sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); |
3820 | 3818 | ||
3821 | netif_poll_enable(hw->dev[0]); | 3819 | napi_enable(&hw->napi); |
3822 | return 0; | 3820 | return 0; |
3823 | } | 3821 | } |
3824 | 3822 | ||
@@ -3943,15 +3941,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | |||
3943 | SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); | 3941 | SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); |
3944 | dev->tx_timeout = sky2_tx_timeout; | 3942 | dev->tx_timeout = sky2_tx_timeout; |
3945 | dev->watchdog_timeo = TX_WATCHDOG; | 3943 | dev->watchdog_timeo = TX_WATCHDOG; |
3946 | if (port == 0) | ||
3947 | dev->poll = sky2_poll; | ||
3948 | dev->weight = NAPI_WEIGHT; | ||
3949 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3944 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3950 | /* Network console (only works on port 0) | 3945 | dev->poll_controller = sky2_netpoll; |
3951 | * because netpoll makes assumptions about NAPI | ||
3952 | */ | ||
3953 | if (port == 0) | ||
3954 | dev->poll_controller = sky2_netpoll; | ||
3955 | #endif | 3946 | #endif |
3956 | 3947 | ||
3957 | sky2 = netdev_priv(dev); | 3948 | sky2 = netdev_priv(dev); |
@@ -4166,6 +4157,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
4166 | err = -ENOMEM; | 4157 | err = -ENOMEM; |
4167 | goto err_out_free_pci; | 4158 | goto err_out_free_pci; |
4168 | } | 4159 | } |
4160 | netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); | ||
4169 | 4161 | ||
4170 | if (!disable_msi && pci_enable_msi(pdev) == 0) { | 4162 | if (!disable_msi && pci_enable_msi(pdev) == 0) { |
4171 | err = sky2_test_msi(hw); | 4163 | err = sky2_test_msi(hw); |
@@ -4288,8 +4280,6 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4288 | if (!hw) | 4280 | if (!hw) |
4289 | return 0; | 4281 | return 0; |
4290 | 4282 | ||
4291 | netif_poll_disable(hw->dev[0]); | ||
4292 | |||
4293 | for (i = 0; i < hw->ports; i++) { | 4283 | for (i = 0; i < hw->ports; i++) { |
4294 | struct net_device *dev = hw->dev[i]; | 4284 | struct net_device *dev = hw->dev[i]; |
4295 | struct sky2_port *sky2 = netdev_priv(dev); | 4285 | struct sky2_port *sky2 = netdev_priv(dev); |
@@ -4356,8 +4346,6 @@ static int sky2_resume(struct pci_dev *pdev) | |||
4356 | } | 4346 | } |
4357 | } | 4347 | } |
4358 | 4348 | ||
4359 | netif_poll_enable(hw->dev[0]); | ||
4360 | |||
4361 | return 0; | 4349 | return 0; |
4362 | out: | 4350 | out: |
4363 | dev_err(&pdev->dev, "resume failed (%d)\n", err); | 4351 | dev_err(&pdev->dev, "resume failed (%d)\n", err); |
@@ -4374,7 +4362,7 @@ static void sky2_shutdown(struct pci_dev *pdev) | |||
4374 | if (!hw) | 4362 | if (!hw) |
4375 | return; | 4363 | return; |
4376 | 4364 | ||
4377 | netif_poll_disable(hw->dev[0]); | 4365 | napi_disable(&hw->napi); |
4378 | 4366 | ||
4379 | for (i = 0; i < hw->ports; i++) { | 4367 | for (i = 0; i < hw->ports; i++) { |
4380 | struct net_device *dev = hw->dev[i]; | 4368 | struct net_device *dev = hw->dev[i]; |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 8bc5c54e3efa..f18f8752118e 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -2057,6 +2057,7 @@ struct sky2_port { | |||
2057 | struct sky2_hw { | 2057 | struct sky2_hw { |
2058 | void __iomem *regs; | 2058 | void __iomem *regs; |
2059 | struct pci_dev *pdev; | 2059 | struct pci_dev *pdev; |
2060 | struct napi_struct napi; | ||
2060 | struct net_device *dev[2]; | 2061 | struct net_device *dev[2]; |
2061 | unsigned long flags; | 2062 | unsigned long flags; |
2062 | #define SKY2_HW_USE_MSI 0x00000001 | 2063 | #define SKY2_HW_USE_MSI 0x00000001 |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 82d837ab4db9..6d8f2bb7e0f9 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -1278,34 +1278,26 @@ bad_desc: | |||
1278 | * (using netif_receive_skb). If all/enough packets are up, the driver | 1278 | * (using netif_receive_skb). If all/enough packets are up, the driver |
1279 | * reenables interrupts and returns 0. If not, 1 is returned. | 1279 | * reenables interrupts and returns 0. If not, 1 is returned. |
1280 | */ | 1280 | */ |
1281 | static int | 1281 | static int spider_net_poll(struct napi_struct *napi, int budget) |
1282 | spider_net_poll(struct net_device *netdev, int *budget) | ||
1283 | { | 1282 | { |
1284 | struct spider_net_card *card = netdev_priv(netdev); | 1283 | struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); |
1285 | int packets_to_do, packets_done = 0; | 1284 | struct net_device *netdev = card->netdev; |
1286 | int no_more_packets = 0; | 1285 | int packets_done = 0; |
1287 | 1286 | ||
1288 | packets_to_do = min(*budget, netdev->quota); | 1287 | while (packets_done < budget) { |
1289 | 1288 | if (!spider_net_decode_one_descr(card)) | |
1290 | while (packets_to_do) { | ||
1291 | if (spider_net_decode_one_descr(card)) { | ||
1292 | packets_done++; | ||
1293 | packets_to_do--; | ||
1294 | } else { | ||
1295 | /* no more packets for the stack */ | ||
1296 | no_more_packets = 1; | ||
1297 | break; | 1289 | break; |
1298 | } | 1290 | |
1291 | packets_done++; | ||
1299 | } | 1292 | } |
1300 | 1293 | ||
1301 | if ((packets_done == 0) && (card->num_rx_ints != 0)) { | 1294 | if ((packets_done == 0) && (card->num_rx_ints != 0)) { |
1302 | no_more_packets = spider_net_resync_tail_ptr(card); | 1295 | if (!spider_net_resync_tail_ptr(card)) |
1296 | packets_done = budget; | ||
1303 | spider_net_resync_head_ptr(card); | 1297 | spider_net_resync_head_ptr(card); |
1304 | } | 1298 | } |
1305 | card->num_rx_ints = 0; | 1299 | card->num_rx_ints = 0; |
1306 | 1300 | ||
1307 | netdev->quota -= packets_done; | ||
1308 | *budget -= packets_done; | ||
1309 | spider_net_refill_rx_chain(card); | 1301 | spider_net_refill_rx_chain(card); |
1310 | spider_net_enable_rxdmac(card); | 1302 | spider_net_enable_rxdmac(card); |
1311 | 1303 | ||
@@ -1313,14 +1305,13 @@ spider_net_poll(struct net_device *netdev, int *budget) | |||
1313 | 1305 | ||
1314 | /* if all packets are in the stack, enable interrupts and return 0 */ | 1306 | /* if all packets are in the stack, enable interrupts and return 0 */ |
1315 | /* if not, return 1 */ | 1307 | /* if not, return 1 */ |
1316 | if (no_more_packets) { | 1308 | if (packets_done < budget) { |
1317 | netif_rx_complete(netdev); | 1309 | netif_rx_complete(netdev, napi); |
1318 | spider_net_rx_irq_on(card); | 1310 | spider_net_rx_irq_on(card); |
1319 | card->ignore_rx_ramfull = 0; | 1311 | card->ignore_rx_ramfull = 0; |
1320 | return 0; | ||
1321 | } | 1312 | } |
1322 | 1313 | ||
1323 | return 1; | 1314 | return packets_done; |
1324 | } | 1315 | } |
1325 | 1316 | ||
1326 | /** | 1317 | /** |
@@ -1560,7 +1551,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) | |||
1560 | spider_net_refill_rx_chain(card); | 1551 | spider_net_refill_rx_chain(card); |
1561 | spider_net_enable_rxdmac(card); | 1552 | spider_net_enable_rxdmac(card); |
1562 | card->num_rx_ints ++; | 1553 | card->num_rx_ints ++; |
1563 | netif_rx_schedule(card->netdev); | 1554 | netif_rx_schedule(card->netdev, |
1555 | &card->napi); | ||
1564 | } | 1556 | } |
1565 | show_error = 0; | 1557 | show_error = 0; |
1566 | break; | 1558 | break; |
@@ -1580,7 +1572,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) | |||
1580 | spider_net_refill_rx_chain(card); | 1572 | spider_net_refill_rx_chain(card); |
1581 | spider_net_enable_rxdmac(card); | 1573 | spider_net_enable_rxdmac(card); |
1582 | card->num_rx_ints ++; | 1574 | card->num_rx_ints ++; |
1583 | netif_rx_schedule(card->netdev); | 1575 | netif_rx_schedule(card->netdev, |
1576 | &card->napi); | ||
1584 | show_error = 0; | 1577 | show_error = 0; |
1585 | break; | 1578 | break; |
1586 | 1579 | ||
@@ -1594,7 +1587,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) | |||
1594 | spider_net_refill_rx_chain(card); | 1587 | spider_net_refill_rx_chain(card); |
1595 | spider_net_enable_rxdmac(card); | 1588 | spider_net_enable_rxdmac(card); |
1596 | card->num_rx_ints ++; | 1589 | card->num_rx_ints ++; |
1597 | netif_rx_schedule(card->netdev); | 1590 | netif_rx_schedule(card->netdev, |
1591 | &card->napi); | ||
1598 | show_error = 0; | 1592 | show_error = 0; |
1599 | break; | 1593 | break; |
1600 | 1594 | ||
@@ -1686,11 +1680,11 @@ spider_net_interrupt(int irq, void *ptr) | |||
1686 | 1680 | ||
1687 | if (status_reg & SPIDER_NET_RXINT ) { | 1681 | if (status_reg & SPIDER_NET_RXINT ) { |
1688 | spider_net_rx_irq_off(card); | 1682 | spider_net_rx_irq_off(card); |
1689 | netif_rx_schedule(netdev); | 1683 | netif_rx_schedule(netdev, &card->napi); |
1690 | card->num_rx_ints ++; | 1684 | card->num_rx_ints ++; |
1691 | } | 1685 | } |
1692 | if (status_reg & SPIDER_NET_TXINT) | 1686 | if (status_reg & SPIDER_NET_TXINT) |
1693 | netif_rx_schedule(netdev); | 1687 | netif_rx_schedule(netdev, &card->napi); |
1694 | 1688 | ||
1695 | if (status_reg & SPIDER_NET_LINKINT) | 1689 | if (status_reg & SPIDER_NET_LINKINT) |
1696 | spider_net_link_reset(netdev); | 1690 | spider_net_link_reset(netdev); |
@@ -2034,7 +2028,7 @@ spider_net_open(struct net_device *netdev) | |||
2034 | 2028 | ||
2035 | netif_start_queue(netdev); | 2029 | netif_start_queue(netdev); |
2036 | netif_carrier_on(netdev); | 2030 | netif_carrier_on(netdev); |
2037 | netif_poll_enable(netdev); | 2031 | napi_enable(&card->napi); |
2038 | 2032 | ||
2039 | spider_net_enable_interrupts(card); | 2033 | spider_net_enable_interrupts(card); |
2040 | 2034 | ||
@@ -2204,7 +2198,7 @@ spider_net_stop(struct net_device *netdev) | |||
2204 | { | 2198 | { |
2205 | struct spider_net_card *card = netdev_priv(netdev); | 2199 | struct spider_net_card *card = netdev_priv(netdev); |
2206 | 2200 | ||
2207 | netif_poll_disable(netdev); | 2201 | napi_disable(&card->napi); |
2208 | netif_carrier_off(netdev); | 2202 | netif_carrier_off(netdev); |
2209 | netif_stop_queue(netdev); | 2203 | netif_stop_queue(netdev); |
2210 | del_timer_sync(&card->tx_timer); | 2204 | del_timer_sync(&card->tx_timer); |
@@ -2304,9 +2298,6 @@ spider_net_setup_netdev_ops(struct net_device *netdev) | |||
2304 | /* tx watchdog */ | 2298 | /* tx watchdog */ |
2305 | netdev->tx_timeout = &spider_net_tx_timeout; | 2299 | netdev->tx_timeout = &spider_net_tx_timeout; |
2306 | netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT; | 2300 | netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT; |
2307 | /* NAPI */ | ||
2308 | netdev->poll = &spider_net_poll; | ||
2309 | netdev->weight = SPIDER_NET_NAPI_WEIGHT; | ||
2310 | /* HW VLAN */ | 2301 | /* HW VLAN */ |
2311 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2302 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2312 | /* poll controller */ | 2303 | /* poll controller */ |
@@ -2351,6 +2342,9 @@ spider_net_setup_netdev(struct spider_net_card *card) | |||
2351 | 2342 | ||
2352 | card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; | 2343 | card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; |
2353 | 2344 | ||
2345 | netif_napi_add(netdev, &card->napi, | ||
2346 | spider_net_poll, SPIDER_NET_NAPI_WEIGHT); | ||
2347 | |||
2354 | spider_net_setup_netdev_ops(netdev); | 2348 | spider_net_setup_netdev_ops(netdev); |
2355 | 2349 | ||
2356 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX; | 2350 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX; |
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index dbbdb8cee3c6..a2fcdebc3790 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h | |||
@@ -466,6 +466,8 @@ struct spider_net_card { | |||
466 | struct pci_dev *pdev; | 466 | struct pci_dev *pdev; |
467 | struct mii_phy phy; | 467 | struct mii_phy phy; |
468 | 468 | ||
469 | struct napi_struct napi; | ||
470 | |||
469 | int medium; | 471 | int medium; |
470 | 472 | ||
471 | void __iomem *regs; | 473 | void __iomem *regs; |
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 8b6478663a56..3b9336c34206 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -178,16 +178,13 @@ static int full_duplex[MAX_UNITS] = {0, }; | |||
178 | #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) | 178 | #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) |
179 | 179 | ||
180 | #ifdef HAVE_NETDEV_POLL | 180 | #ifdef HAVE_NETDEV_POLL |
181 | #define init_poll(dev) \ | 181 | #define init_poll(dev, np) \ |
182 | do { \ | 182 | netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work) |
183 | dev->poll = &netdev_poll; \ | 183 | #define netdev_rx(dev, np, ioaddr) \ |
184 | dev->weight = max_interrupt_work; \ | ||
185 | } while (0) | ||
186 | #define netdev_rx(dev, ioaddr) \ | ||
187 | do { \ | 184 | do { \ |
188 | u32 intr_enable; \ | 185 | u32 intr_enable; \ |
189 | if (netif_rx_schedule_prep(dev)) { \ | 186 | if (netif_rx_schedule_prep(dev, &np->napi)) { \ |
190 | __netif_rx_schedule(dev); \ | 187 | __netif_rx_schedule(dev, &np->napi); \ |
191 | intr_enable = readl(ioaddr + IntrEnable); \ | 188 | intr_enable = readl(ioaddr + IntrEnable); \ |
192 | intr_enable &= ~(IntrRxDone | IntrRxEmpty); \ | 189 | intr_enable &= ~(IntrRxDone | IntrRxEmpty); \ |
193 | writel(intr_enable, ioaddr + IntrEnable); \ | 190 | writel(intr_enable, ioaddr + IntrEnable); \ |
@@ -204,12 +201,12 @@ do { \ | |||
204 | } while (0) | 201 | } while (0) |
205 | #define netdev_receive_skb(skb) netif_receive_skb(skb) | 202 | #define netdev_receive_skb(skb) netif_receive_skb(skb) |
206 | #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid) | 203 | #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid) |
207 | static int netdev_poll(struct net_device *dev, int *budget); | 204 | static int netdev_poll(struct napi_struct *napi, int budget); |
208 | #else /* not HAVE_NETDEV_POLL */ | 205 | #else /* not HAVE_NETDEV_POLL */ |
209 | #define init_poll(dev) | 206 | #define init_poll(dev, np) |
210 | #define netdev_receive_skb(skb) netif_rx(skb) | 207 | #define netdev_receive_skb(skb) netif_rx(skb) |
211 | #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid) | 208 | #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid) |
212 | #define netdev_rx(dev, ioaddr) \ | 209 | #define netdev_rx(dev, np, ioaddr) \ |
213 | do { \ | 210 | do { \ |
214 | int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \ | 211 | int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \ |
215 | __netdev_rx(dev, "a);\ | 212 | __netdev_rx(dev, "a);\ |
@@ -599,6 +596,8 @@ struct netdev_private { | |||
599 | struct tx_done_desc *tx_done_q; | 596 | struct tx_done_desc *tx_done_q; |
600 | dma_addr_t tx_done_q_dma; | 597 | dma_addr_t tx_done_q_dma; |
601 | unsigned int tx_done; | 598 | unsigned int tx_done; |
599 | struct napi_struct napi; | ||
600 | struct net_device *dev; | ||
602 | struct net_device_stats stats; | 601 | struct net_device_stats stats; |
603 | struct pci_dev *pci_dev; | 602 | struct pci_dev *pci_dev; |
604 | #ifdef VLAN_SUPPORT | 603 | #ifdef VLAN_SUPPORT |
@@ -791,6 +790,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, | |||
791 | dev->irq = irq; | 790 | dev->irq = irq; |
792 | 791 | ||
793 | np = netdev_priv(dev); | 792 | np = netdev_priv(dev); |
793 | np->dev = dev; | ||
794 | np->base = base; | 794 | np->base = base; |
795 | spin_lock_init(&np->lock); | 795 | spin_lock_init(&np->lock); |
796 | pci_set_drvdata(pdev, dev); | 796 | pci_set_drvdata(pdev, dev); |
@@ -851,7 +851,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, | |||
851 | dev->hard_start_xmit = &start_tx; | 851 | dev->hard_start_xmit = &start_tx; |
852 | dev->tx_timeout = tx_timeout; | 852 | dev->tx_timeout = tx_timeout; |
853 | dev->watchdog_timeo = TX_TIMEOUT; | 853 | dev->watchdog_timeo = TX_TIMEOUT; |
854 | init_poll(dev); | 854 | init_poll(dev, np); |
855 | dev->stop = &netdev_close; | 855 | dev->stop = &netdev_close; |
856 | dev->get_stats = &get_stats; | 856 | dev->get_stats = &get_stats; |
857 | dev->set_multicast_list = &set_rx_mode; | 857 | dev->set_multicast_list = &set_rx_mode; |
@@ -1056,6 +1056,9 @@ static int netdev_open(struct net_device *dev) | |||
1056 | 1056 | ||
1057 | writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); | 1057 | writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); |
1058 | 1058 | ||
1059 | #ifdef HAVE_NETDEV_POLL | ||
1060 | napi_enable(&np->napi); | ||
1061 | #endif | ||
1059 | netif_start_queue(dev); | 1062 | netif_start_queue(dev); |
1060 | 1063 | ||
1061 | if (debug > 1) | 1064 | if (debug > 1) |
@@ -1330,7 +1333,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1330 | handled = 1; | 1333 | handled = 1; |
1331 | 1334 | ||
1332 | if (intr_status & (IntrRxDone | IntrRxEmpty)) | 1335 | if (intr_status & (IntrRxDone | IntrRxEmpty)) |
1333 | netdev_rx(dev, ioaddr); | 1336 | netdev_rx(dev, np, ioaddr); |
1334 | 1337 | ||
1335 | /* Scavenge the skbuff list based on the Tx-done queue. | 1338 | /* Scavenge the skbuff list based on the Tx-done queue. |
1336 | There are redundant checks here that may be cleaned up | 1339 | There are redundant checks here that may be cleaned up |
@@ -1531,36 +1534,35 @@ static int __netdev_rx(struct net_device *dev, int *quota) | |||
1531 | 1534 | ||
1532 | 1535 | ||
1533 | #ifdef HAVE_NETDEV_POLL | 1536 | #ifdef HAVE_NETDEV_POLL |
1534 | static int netdev_poll(struct net_device *dev, int *budget) | 1537 | static int netdev_poll(struct napi_struct *napi, int budget) |
1535 | { | 1538 | { |
1539 | struct netdev_private *np = container_of(napi, struct netdev_private, napi); | ||
1540 | struct net_device *dev = np->dev; | ||
1536 | u32 intr_status; | 1541 | u32 intr_status; |
1537 | struct netdev_private *np = netdev_priv(dev); | ||
1538 | void __iomem *ioaddr = np->base; | 1542 | void __iomem *ioaddr = np->base; |
1539 | int retcode = 0, quota = dev->quota; | 1543 | int quota = budget; |
1540 | 1544 | ||
1541 | do { | 1545 | do { |
1542 | writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear); | 1546 | writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear); |
1543 | 1547 | ||
1544 | retcode = __netdev_rx(dev, "a); | 1548 | if (__netdev_rx(dev, "a)) |
1545 | *budget -= (dev->quota - quota); | ||
1546 | dev->quota = quota; | ||
1547 | if (retcode) | ||
1548 | goto out; | 1549 | goto out; |
1549 | 1550 | ||
1550 | intr_status = readl(ioaddr + IntrStatus); | 1551 | intr_status = readl(ioaddr + IntrStatus); |
1551 | } while (intr_status & (IntrRxDone | IntrRxEmpty)); | 1552 | } while (intr_status & (IntrRxDone | IntrRxEmpty)); |
1552 | 1553 | ||
1553 | netif_rx_complete(dev); | 1554 | netif_rx_complete(dev, napi); |
1554 | intr_status = readl(ioaddr + IntrEnable); | 1555 | intr_status = readl(ioaddr + IntrEnable); |
1555 | intr_status |= IntrRxDone | IntrRxEmpty; | 1556 | intr_status |= IntrRxDone | IntrRxEmpty; |
1556 | writel(intr_status, ioaddr + IntrEnable); | 1557 | writel(intr_status, ioaddr + IntrEnable); |
1557 | 1558 | ||
1558 | out: | 1559 | out: |
1559 | if (debug > 5) | 1560 | if (debug > 5) |
1560 | printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", retcode); | 1561 | printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", |
1562 | budget - quota); | ||
1561 | 1563 | ||
1562 | /* Restart Rx engine if stopped. */ | 1564 | /* Restart Rx engine if stopped. */ |
1563 | return retcode; | 1565 | return budget - quota; |
1564 | } | 1566 | } |
1565 | #endif /* HAVE_NETDEV_POLL */ | 1567 | #endif /* HAVE_NETDEV_POLL */ |
1566 | 1568 | ||
@@ -1904,6 +1906,9 @@ static int netdev_close(struct net_device *dev) | |||
1904 | int i; | 1906 | int i; |
1905 | 1907 | ||
1906 | netif_stop_queue(dev); | 1908 | netif_stop_queue(dev); |
1909 | #ifdef HAVE_NETDEV_POLL | ||
1910 | napi_disable(&np->napi); | ||
1911 | #endif | ||
1907 | 1912 | ||
1908 | if (debug > 1) { | 1913 | if (debug > 1) { |
1909 | printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n", | 1914 | printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n", |
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 432803855034..bf821e96f7b2 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * | 19 | * |
20 | * gem_change_mtu() and gem_set_multicast() are called with a read_lock() | 20 | * gem_change_mtu() and gem_set_multicast() are called with a read_lock() |
21 | * help by net/core/dev.c, thus they can't schedule. That means they can't | 21 | * help by net/core/dev.c, thus they can't schedule. That means they can't |
22 | * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock | 22 | * call napi_disable() neither, thus force gem_poll() to keep a spinlock |
23 | * where it could have been dropped. change_mtu especially would love also to | 23 | * where it could have been dropped. change_mtu especially would love also to |
24 | * be able to msleep instead of horrid locked delays when resetting the HW, | 24 | * be able to msleep instead of horrid locked delays when resetting the HW, |
25 | * but that read_lock() makes it impossible, unless I defer it's action to | 25 | * but that read_lock() makes it impossible, unless I defer it's action to |
@@ -878,19 +878,20 @@ static int gem_rx(struct gem *gp, int work_to_do) | |||
878 | return work_done; | 878 | return work_done; |
879 | } | 879 | } |
880 | 880 | ||
881 | static int gem_poll(struct net_device *dev, int *budget) | 881 | static int gem_poll(struct napi_struct *napi, int budget) |
882 | { | 882 | { |
883 | struct gem *gp = dev->priv; | 883 | struct gem *gp = container_of(napi, struct gem, napi); |
884 | struct net_device *dev = gp->dev; | ||
884 | unsigned long flags; | 885 | unsigned long flags; |
886 | int work_done; | ||
885 | 887 | ||
886 | /* | 888 | /* |
887 | * NAPI locking nightmare: See comment at head of driver | 889 | * NAPI locking nightmare: See comment at head of driver |
888 | */ | 890 | */ |
889 | spin_lock_irqsave(&gp->lock, flags); | 891 | spin_lock_irqsave(&gp->lock, flags); |
890 | 892 | ||
893 | work_done = 0; | ||
891 | do { | 894 | do { |
892 | int work_to_do, work_done; | ||
893 | |||
894 | /* Handle anomalies */ | 895 | /* Handle anomalies */ |
895 | if (gp->status & GREG_STAT_ABNORMAL) { | 896 | if (gp->status & GREG_STAT_ABNORMAL) { |
896 | if (gem_abnormal_irq(dev, gp, gp->status)) | 897 | if (gem_abnormal_irq(dev, gp, gp->status)) |
@@ -906,29 +907,25 @@ static int gem_poll(struct net_device *dev, int *budget) | |||
906 | 907 | ||
907 | /* Run RX thread. We don't use any locking here, | 908 | /* Run RX thread. We don't use any locking here, |
908 | * code willing to do bad things - like cleaning the | 909 | * code willing to do bad things - like cleaning the |
909 | * rx ring - must call netif_poll_disable(), which | 910 | * rx ring - must call napi_disable(), which |
910 | * schedule_timeout()'s if polling is already disabled. | 911 | * schedule_timeout()'s if polling is already disabled. |
911 | */ | 912 | */ |
912 | work_to_do = min(*budget, dev->quota); | 913 | work_done += gem_rx(gp, budget); |
913 | |||
914 | work_done = gem_rx(gp, work_to_do); | ||
915 | |||
916 | *budget -= work_done; | ||
917 | dev->quota -= work_done; | ||
918 | 914 | ||
919 | if (work_done >= work_to_do) | 915 | if (work_done >= budget) |
920 | return 1; | 916 | return work_done; |
921 | 917 | ||
922 | spin_lock_irqsave(&gp->lock, flags); | 918 | spin_lock_irqsave(&gp->lock, flags); |
923 | 919 | ||
924 | gp->status = readl(gp->regs + GREG_STAT); | 920 | gp->status = readl(gp->regs + GREG_STAT); |
925 | } while (gp->status & GREG_STAT_NAPI); | 921 | } while (gp->status & GREG_STAT_NAPI); |
926 | 922 | ||
927 | __netif_rx_complete(dev); | 923 | __netif_rx_complete(dev, napi); |
928 | gem_enable_ints(gp); | 924 | gem_enable_ints(gp); |
929 | 925 | ||
930 | spin_unlock_irqrestore(&gp->lock, flags); | 926 | spin_unlock_irqrestore(&gp->lock, flags); |
931 | return 0; | 927 | |
928 | return work_done; | ||
932 | } | 929 | } |
933 | 930 | ||
934 | static irqreturn_t gem_interrupt(int irq, void *dev_id) | 931 | static irqreturn_t gem_interrupt(int irq, void *dev_id) |
@@ -946,17 +943,17 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) | |||
946 | 943 | ||
947 | spin_lock_irqsave(&gp->lock, flags); | 944 | spin_lock_irqsave(&gp->lock, flags); |
948 | 945 | ||
949 | if (netif_rx_schedule_prep(dev)) { | 946 | if (netif_rx_schedule_prep(dev, &gp->napi)) { |
950 | u32 gem_status = readl(gp->regs + GREG_STAT); | 947 | u32 gem_status = readl(gp->regs + GREG_STAT); |
951 | 948 | ||
952 | if (gem_status == 0) { | 949 | if (gem_status == 0) { |
953 | netif_poll_enable(dev); | 950 | napi_enable(&gp->napi); |
954 | spin_unlock_irqrestore(&gp->lock, flags); | 951 | spin_unlock_irqrestore(&gp->lock, flags); |
955 | return IRQ_NONE; | 952 | return IRQ_NONE; |
956 | } | 953 | } |
957 | gp->status = gem_status; | 954 | gp->status = gem_status; |
958 | gem_disable_ints(gp); | 955 | gem_disable_ints(gp); |
959 | __netif_rx_schedule(dev); | 956 | __netif_rx_schedule(dev, &gp->napi); |
960 | } | 957 | } |
961 | 958 | ||
962 | spin_unlock_irqrestore(&gp->lock, flags); | 959 | spin_unlock_irqrestore(&gp->lock, flags); |
@@ -2284,7 +2281,7 @@ static void gem_reset_task(struct work_struct *work) | |||
2284 | 2281 | ||
2285 | mutex_lock(&gp->pm_mutex); | 2282 | mutex_lock(&gp->pm_mutex); |
2286 | 2283 | ||
2287 | netif_poll_disable(gp->dev); | 2284 | napi_disable(&gp->napi); |
2288 | 2285 | ||
2289 | spin_lock_irq(&gp->lock); | 2286 | spin_lock_irq(&gp->lock); |
2290 | spin_lock(&gp->tx_lock); | 2287 | spin_lock(&gp->tx_lock); |
@@ -2307,7 +2304,7 @@ static void gem_reset_task(struct work_struct *work) | |||
2307 | spin_unlock(&gp->tx_lock); | 2304 | spin_unlock(&gp->tx_lock); |
2308 | spin_unlock_irq(&gp->lock); | 2305 | spin_unlock_irq(&gp->lock); |
2309 | 2306 | ||
2310 | netif_poll_enable(gp->dev); | 2307 | napi_enable(&gp->napi); |
2311 | 2308 | ||
2312 | mutex_unlock(&gp->pm_mutex); | 2309 | mutex_unlock(&gp->pm_mutex); |
2313 | } | 2310 | } |
@@ -2324,6 +2321,8 @@ static int gem_open(struct net_device *dev) | |||
2324 | if (!gp->asleep) | 2321 | if (!gp->asleep) |
2325 | rc = gem_do_start(dev); | 2322 | rc = gem_do_start(dev); |
2326 | gp->opened = (rc == 0); | 2323 | gp->opened = (rc == 0); |
2324 | if (gp->opened) | ||
2325 | napi_enable(&gp->napi); | ||
2327 | 2326 | ||
2328 | mutex_unlock(&gp->pm_mutex); | 2327 | mutex_unlock(&gp->pm_mutex); |
2329 | 2328 | ||
@@ -2334,9 +2333,7 @@ static int gem_close(struct net_device *dev) | |||
2334 | { | 2333 | { |
2335 | struct gem *gp = dev->priv; | 2334 | struct gem *gp = dev->priv; |
2336 | 2335 | ||
2337 | /* Note: we don't need to call netif_poll_disable() here because | 2336 | napi_disable(&gp->napi); |
2338 | * our caller (dev_close) already did it for us | ||
2339 | */ | ||
2340 | 2337 | ||
2341 | mutex_lock(&gp->pm_mutex); | 2338 | mutex_lock(&gp->pm_mutex); |
2342 | 2339 | ||
@@ -2358,7 +2355,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2358 | 2355 | ||
2359 | mutex_lock(&gp->pm_mutex); | 2356 | mutex_lock(&gp->pm_mutex); |
2360 | 2357 | ||
2361 | netif_poll_disable(dev); | 2358 | napi_disable(&gp->napi); |
2362 | 2359 | ||
2363 | printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", | 2360 | printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", |
2364 | dev->name, | 2361 | dev->name, |
@@ -2482,7 +2479,7 @@ static int gem_resume(struct pci_dev *pdev) | |||
2482 | spin_unlock(&gp->tx_lock); | 2479 | spin_unlock(&gp->tx_lock); |
2483 | spin_unlock_irqrestore(&gp->lock, flags); | 2480 | spin_unlock_irqrestore(&gp->lock, flags); |
2484 | 2481 | ||
2485 | netif_poll_enable(dev); | 2482 | napi_enable(&gp->napi); |
2486 | 2483 | ||
2487 | mutex_unlock(&gp->pm_mutex); | 2484 | mutex_unlock(&gp->pm_mutex); |
2488 | 2485 | ||
@@ -3121,8 +3118,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3121 | dev->get_stats = gem_get_stats; | 3118 | dev->get_stats = gem_get_stats; |
3122 | dev->set_multicast_list = gem_set_multicast; | 3119 | dev->set_multicast_list = gem_set_multicast; |
3123 | dev->do_ioctl = gem_ioctl; | 3120 | dev->do_ioctl = gem_ioctl; |
3124 | dev->poll = gem_poll; | 3121 | netif_napi_add(dev, &gp->napi, gem_poll, 64); |
3125 | dev->weight = 64; | ||
3126 | dev->ethtool_ops = &gem_ethtool_ops; | 3122 | dev->ethtool_ops = &gem_ethtool_ops; |
3127 | dev->tx_timeout = gem_tx_timeout; | 3123 | dev->tx_timeout = gem_tx_timeout; |
3128 | dev->watchdog_timeo = 5 * HZ; | 3124 | dev->watchdog_timeo = 5 * HZ; |
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index 58cf87c5751e..76d760acc9e2 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h | |||
@@ -993,6 +993,7 @@ struct gem { | |||
993 | u32 msg_enable; | 993 | u32 msg_enable; |
994 | u32 status; | 994 | u32 status; |
995 | 995 | ||
996 | struct napi_struct napi; | ||
996 | struct net_device_stats net_stats; | 997 | struct net_device_stats net_stats; |
997 | 998 | ||
998 | int tx_fifo_sz; | 999 | int tx_fifo_sz; |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index ec41469eee82..b5e0dff67230 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -414,6 +414,9 @@ enum tc35815_timer_state { | |||
414 | struct tc35815_local { | 414 | struct tc35815_local { |
415 | struct pci_dev *pci_dev; | 415 | struct pci_dev *pci_dev; |
416 | 416 | ||
417 | struct net_device *dev; | ||
418 | struct napi_struct napi; | ||
419 | |||
417 | /* statistics */ | 420 | /* statistics */ |
418 | struct net_device_stats stats; | 421 | struct net_device_stats stats; |
419 | struct { | 422 | struct { |
@@ -566,7 +569,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); | |||
566 | static irqreturn_t tc35815_interrupt(int irq, void *dev_id); | 569 | static irqreturn_t tc35815_interrupt(int irq, void *dev_id); |
567 | #ifdef TC35815_NAPI | 570 | #ifdef TC35815_NAPI |
568 | static int tc35815_rx(struct net_device *dev, int limit); | 571 | static int tc35815_rx(struct net_device *dev, int limit); |
569 | static int tc35815_poll(struct net_device *dev, int *budget); | 572 | static int tc35815_poll(struct napi_struct *napi, int budget); |
570 | #else | 573 | #else |
571 | static void tc35815_rx(struct net_device *dev); | 574 | static void tc35815_rx(struct net_device *dev); |
572 | #endif | 575 | #endif |
@@ -685,6 +688,7 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, | |||
685 | SET_MODULE_OWNER(dev); | 688 | SET_MODULE_OWNER(dev); |
686 | SET_NETDEV_DEV(dev, &pdev->dev); | 689 | SET_NETDEV_DEV(dev, &pdev->dev); |
687 | lp = dev->priv; | 690 | lp = dev->priv; |
691 | lp->dev = dev; | ||
688 | 692 | ||
689 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ | 693 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ |
690 | rc = pci_enable_device (pdev); | 694 | rc = pci_enable_device (pdev); |
@@ -738,8 +742,7 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, | |||
738 | dev->tx_timeout = tc35815_tx_timeout; | 742 | dev->tx_timeout = tc35815_tx_timeout; |
739 | dev->watchdog_timeo = TC35815_TX_TIMEOUT; | 743 | dev->watchdog_timeo = TC35815_TX_TIMEOUT; |
740 | #ifdef TC35815_NAPI | 744 | #ifdef TC35815_NAPI |
741 | dev->poll = tc35815_poll; | 745 | netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); |
742 | dev->weight = NAPI_WEIGHT; | ||
743 | #endif | 746 | #endif |
744 | #ifdef CONFIG_NET_POLL_CONTROLLER | 747 | #ifdef CONFIG_NET_POLL_CONTROLLER |
745 | dev->poll_controller = tc35815_poll_controller; | 748 | dev->poll_controller = tc35815_poll_controller; |
@@ -748,8 +751,6 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, | |||
748 | dev->irq = pdev->irq; | 751 | dev->irq = pdev->irq; |
749 | dev->base_addr = (unsigned long) ioaddr; | 752 | dev->base_addr = (unsigned long) ioaddr; |
750 | 753 | ||
751 | /* dev->priv/lp zeroed and aligned in alloc_etherdev */ | ||
752 | lp = dev->priv; | ||
753 | spin_lock_init(&lp->lock); | 754 | spin_lock_init(&lp->lock); |
754 | lp->pci_dev = pdev; | 755 | lp->pci_dev = pdev; |
755 | lp->boardtype = ent->driver_data; | 756 | lp->boardtype = ent->driver_data; |
@@ -1237,6 +1238,10 @@ tc35815_open(struct net_device *dev) | |||
1237 | return -EAGAIN; | 1238 | return -EAGAIN; |
1238 | } | 1239 | } |
1239 | 1240 | ||
1241 | #ifdef TC35815_NAPI | ||
1242 | napi_enable(&lp->napi); | ||
1243 | #endif | ||
1244 | |||
1240 | /* Reset the hardware here. Don't forget to set the station address. */ | 1245 | /* Reset the hardware here. Don't forget to set the station address. */ |
1241 | spin_lock_irq(&lp->lock); | 1246 | spin_lock_irq(&lp->lock); |
1242 | tc35815_chip_init(dev); | 1247 | tc35815_chip_init(dev); |
@@ -1436,6 +1441,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status) | |||
1436 | static irqreturn_t tc35815_interrupt(int irq, void *dev_id) | 1441 | static irqreturn_t tc35815_interrupt(int irq, void *dev_id) |
1437 | { | 1442 | { |
1438 | struct net_device *dev = dev_id; | 1443 | struct net_device *dev = dev_id; |
1444 | struct tc35815_local *lp = netdev_priv(dev); | ||
1439 | struct tc35815_regs __iomem *tr = | 1445 | struct tc35815_regs __iomem *tr = |
1440 | (struct tc35815_regs __iomem *)dev->base_addr; | 1446 | (struct tc35815_regs __iomem *)dev->base_addr; |
1441 | #ifdef TC35815_NAPI | 1447 | #ifdef TC35815_NAPI |
@@ -1444,8 +1450,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id) | |||
1444 | if (!(dmactl & DMA_IntMask)) { | 1450 | if (!(dmactl & DMA_IntMask)) { |
1445 | /* disable interrupts */ | 1451 | /* disable interrupts */ |
1446 | tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); | 1452 | tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); |
1447 | if (netif_rx_schedule_prep(dev)) | 1453 | if (netif_rx_schedule_prep(dev, &lp->napi)) |
1448 | __netif_rx_schedule(dev); | 1454 | __netif_rx_schedule(dev, &lp->napi); |
1449 | else { | 1455 | else { |
1450 | printk(KERN_ERR "%s: interrupt taken in poll\n", | 1456 | printk(KERN_ERR "%s: interrupt taken in poll\n", |
1451 | dev->name); | 1457 | dev->name); |
@@ -1726,13 +1732,12 @@ tc35815_rx(struct net_device *dev) | |||
1726 | } | 1732 | } |
1727 | 1733 | ||
1728 | #ifdef TC35815_NAPI | 1734 | #ifdef TC35815_NAPI |
1729 | static int | 1735 | static int tc35815_poll(struct napi_struct *napi, int budget) |
1730 | tc35815_poll(struct net_device *dev, int *budget) | ||
1731 | { | 1736 | { |
1732 | struct tc35815_local *lp = dev->priv; | 1737 | struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); |
1738 | struct net_device *dev = lp->dev; | ||
1733 | struct tc35815_regs __iomem *tr = | 1739 | struct tc35815_regs __iomem *tr = |
1734 | (struct tc35815_regs __iomem *)dev->base_addr; | 1740 | (struct tc35815_regs __iomem *)dev->base_addr; |
1735 | int limit = min(*budget, dev->quota); | ||
1736 | int received = 0, handled; | 1741 | int received = 0, handled; |
1737 | u32 status; | 1742 | u32 status; |
1738 | 1743 | ||
@@ -1744,23 +1749,19 @@ tc35815_poll(struct net_device *dev, int *budget) | |||
1744 | handled = tc35815_do_interrupt(dev, status, limit); | 1749 | handled = tc35815_do_interrupt(dev, status, limit); |
1745 | if (handled >= 0) { | 1750 | if (handled >= 0) { |
1746 | received += handled; | 1751 | received += handled; |
1747 | limit -= handled; | 1752 | if (received >= budget) |
1748 | if (limit <= 0) | ||
1749 | break; | 1753 | break; |
1750 | } | 1754 | } |
1751 | status = tc_readl(&tr->Int_Src); | 1755 | status = tc_readl(&tr->Int_Src); |
1752 | } while (status); | 1756 | } while (status); |
1753 | spin_unlock(&lp->lock); | 1757 | spin_unlock(&lp->lock); |
1754 | 1758 | ||
1755 | dev->quota -= received; | 1759 | if (received < budget) { |
1756 | *budget -= received; | 1760 | netif_rx_complete(dev, napi); |
1757 | if (limit <= 0) | 1761 | /* enable interrupts */ |
1758 | return 1; | 1762 | tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); |
1759 | 1763 | } | |
1760 | netif_rx_complete(dev); | 1764 | return received; |
1761 | /* enable interrupts */ | ||
1762 | tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); | ||
1763 | return 0; | ||
1764 | } | 1765 | } |
1765 | #endif | 1766 | #endif |
1766 | 1767 | ||
@@ -1949,7 +1950,11 @@ static int | |||
1949 | tc35815_close(struct net_device *dev) | 1950 | tc35815_close(struct net_device *dev) |
1950 | { | 1951 | { |
1951 | struct tc35815_local *lp = dev->priv; | 1952 | struct tc35815_local *lp = dev->priv; |
1953 | |||
1952 | netif_stop_queue(dev); | 1954 | netif_stop_queue(dev); |
1955 | #ifdef TC35815_NAPI | ||
1956 | napi_disable(&lp->napi); | ||
1957 | #endif | ||
1953 | 1958 | ||
1954 | /* Flush the Tx and disable Rx here. */ | 1959 | /* Flush the Tx and disable Rx here. */ |
1955 | 1960 | ||
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 9034a05734ef..ef1e3d1173c4 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -574,7 +574,7 @@ static void tg3_restart_ints(struct tg3 *tp) | |||
574 | static inline void tg3_netif_stop(struct tg3 *tp) | 574 | static inline void tg3_netif_stop(struct tg3 *tp) |
575 | { | 575 | { |
576 | tp->dev->trans_start = jiffies; /* prevent tx timeout */ | 576 | tp->dev->trans_start = jiffies; /* prevent tx timeout */ |
577 | netif_poll_disable(tp->dev); | 577 | napi_disable(&tp->napi); |
578 | netif_tx_disable(tp->dev); | 578 | netif_tx_disable(tp->dev); |
579 | } | 579 | } |
580 | 580 | ||
@@ -585,7 +585,7 @@ static inline void tg3_netif_start(struct tg3 *tp) | |||
585 | * so long as all callers are assured to have free tx slots | 585 | * so long as all callers are assured to have free tx slots |
586 | * (such as after tg3_init_hw) | 586 | * (such as after tg3_init_hw) |
587 | */ | 587 | */ |
588 | netif_poll_enable(tp->dev); | 588 | napi_enable(&tp->napi); |
589 | tp->hw_status->status |= SD_STATUS_UPDATED; | 589 | tp->hw_status->status |= SD_STATUS_UPDATED; |
590 | tg3_enable_ints(tp); | 590 | tg3_enable_ints(tp); |
591 | } | 591 | } |
@@ -3471,11 +3471,12 @@ next_pkt_nopost: | |||
3471 | return received; | 3471 | return received; |
3472 | } | 3472 | } |
3473 | 3473 | ||
3474 | static int tg3_poll(struct net_device *netdev, int *budget) | 3474 | static int tg3_poll(struct napi_struct *napi, int budget) |
3475 | { | 3475 | { |
3476 | struct tg3 *tp = netdev_priv(netdev); | 3476 | struct tg3 *tp = container_of(napi, struct tg3, napi); |
3477 | struct net_device *netdev = tp->dev; | ||
3477 | struct tg3_hw_status *sblk = tp->hw_status; | 3478 | struct tg3_hw_status *sblk = tp->hw_status; |
3478 | int done; | 3479 | int work_done = 0; |
3479 | 3480 | ||
3480 | /* handle link change and other phy events */ | 3481 | /* handle link change and other phy events */ |
3481 | if (!(tp->tg3_flags & | 3482 | if (!(tp->tg3_flags & |
@@ -3494,7 +3495,7 @@ static int tg3_poll(struct net_device *netdev, int *budget) | |||
3494 | if (sblk->idx[0].tx_consumer != tp->tx_cons) { | 3495 | if (sblk->idx[0].tx_consumer != tp->tx_cons) { |
3495 | tg3_tx(tp); | 3496 | tg3_tx(tp); |
3496 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) { | 3497 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) { |
3497 | netif_rx_complete(netdev); | 3498 | netif_rx_complete(netdev, napi); |
3498 | schedule_work(&tp->reset_task); | 3499 | schedule_work(&tp->reset_task); |
3499 | return 0; | 3500 | return 0; |
3500 | } | 3501 | } |
@@ -3502,20 +3503,10 @@ static int tg3_poll(struct net_device *netdev, int *budget) | |||
3502 | 3503 | ||
3503 | /* run RX thread, within the bounds set by NAPI. | 3504 | /* run RX thread, within the bounds set by NAPI. |
3504 | * All RX "locking" is done by ensuring outside | 3505 | * All RX "locking" is done by ensuring outside |
3505 | * code synchronizes with dev->poll() | 3506 | * code synchronizes with tg3->napi.poll() |
3506 | */ | 3507 | */ |
3507 | if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { | 3508 | if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) |
3508 | int orig_budget = *budget; | 3509 | work_done = tg3_rx(tp, budget); |
3509 | int work_done; | ||
3510 | |||
3511 | if (orig_budget > netdev->quota) | ||
3512 | orig_budget = netdev->quota; | ||
3513 | |||
3514 | work_done = tg3_rx(tp, orig_budget); | ||
3515 | |||
3516 | *budget -= work_done; | ||
3517 | netdev->quota -= work_done; | ||
3518 | } | ||
3519 | 3510 | ||
3520 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { | 3511 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { |
3521 | tp->last_tag = sblk->status_tag; | 3512 | tp->last_tag = sblk->status_tag; |
@@ -3524,13 +3515,12 @@ static int tg3_poll(struct net_device *netdev, int *budget) | |||
3524 | sblk->status &= ~SD_STATUS_UPDATED; | 3515 | sblk->status &= ~SD_STATUS_UPDATED; |
3525 | 3516 | ||
3526 | /* if no more work, tell net stack and NIC we're done */ | 3517 | /* if no more work, tell net stack and NIC we're done */ |
3527 | done = !tg3_has_work(tp); | 3518 | if (!tg3_has_work(tp)) { |
3528 | if (done) { | 3519 | netif_rx_complete(netdev, napi); |
3529 | netif_rx_complete(netdev); | ||
3530 | tg3_restart_ints(tp); | 3520 | tg3_restart_ints(tp); |
3531 | } | 3521 | } |
3532 | 3522 | ||
3533 | return (done ? 0 : 1); | 3523 | return work_done; |
3534 | } | 3524 | } |
3535 | 3525 | ||
3536 | static void tg3_irq_quiesce(struct tg3 *tp) | 3526 | static void tg3_irq_quiesce(struct tg3 *tp) |
@@ -3577,7 +3567,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) | |||
3577 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 3567 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
3578 | 3568 | ||
3579 | if (likely(!tg3_irq_sync(tp))) | 3569 | if (likely(!tg3_irq_sync(tp))) |
3580 | netif_rx_schedule(dev); /* schedule NAPI poll */ | 3570 | netif_rx_schedule(dev, &tp->napi); |
3581 | 3571 | ||
3582 | return IRQ_HANDLED; | 3572 | return IRQ_HANDLED; |
3583 | } | 3573 | } |
@@ -3602,7 +3592,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id) | |||
3602 | */ | 3592 | */ |
3603 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 3593 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); |
3604 | if (likely(!tg3_irq_sync(tp))) | 3594 | if (likely(!tg3_irq_sync(tp))) |
3605 | netif_rx_schedule(dev); /* schedule NAPI poll */ | 3595 | netif_rx_schedule(dev, &tp->napi); |
3606 | 3596 | ||
3607 | return IRQ_RETVAL(1); | 3597 | return IRQ_RETVAL(1); |
3608 | } | 3598 | } |
@@ -3644,7 +3634,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) | |||
3644 | sblk->status &= ~SD_STATUS_UPDATED; | 3634 | sblk->status &= ~SD_STATUS_UPDATED; |
3645 | if (likely(tg3_has_work(tp))) { | 3635 | if (likely(tg3_has_work(tp))) { |
3646 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 3636 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
3647 | netif_rx_schedule(dev); /* schedule NAPI poll */ | 3637 | netif_rx_schedule(dev, &tp->napi); |
3648 | } else { | 3638 | } else { |
3649 | /* No work, shared interrupt perhaps? re-enable | 3639 | /* No work, shared interrupt perhaps? re-enable |
3650 | * interrupts, and flush that PCI write | 3640 | * interrupts, and flush that PCI write |
@@ -3690,7 +3680,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) | |||
3690 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 3680 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); |
3691 | if (tg3_irq_sync(tp)) | 3681 | if (tg3_irq_sync(tp)) |
3692 | goto out; | 3682 | goto out; |
3693 | if (netif_rx_schedule_prep(dev)) { | 3683 | if (netif_rx_schedule_prep(dev, &tp->napi)) { |
3694 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 3684 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
3695 | /* Update last_tag to mark that this status has been | 3685 | /* Update last_tag to mark that this status has been |
3696 | * seen. Because interrupt may be shared, we may be | 3686 | * seen. Because interrupt may be shared, we may be |
@@ -3698,7 +3688,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) | |||
3698 | * if tg3_poll() is not scheduled. | 3688 | * if tg3_poll() is not scheduled. |
3699 | */ | 3689 | */ |
3700 | tp->last_tag = sblk->status_tag; | 3690 | tp->last_tag = sblk->status_tag; |
3701 | __netif_rx_schedule(dev); | 3691 | __netif_rx_schedule(dev, &tp->napi); |
3702 | } | 3692 | } |
3703 | out: | 3693 | out: |
3704 | return IRQ_RETVAL(handled); | 3694 | return IRQ_RETVAL(handled); |
@@ -3737,7 +3727,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) | |||
3737 | tg3_full_unlock(tp); | 3727 | tg3_full_unlock(tp); |
3738 | del_timer_sync(&tp->timer); | 3728 | del_timer_sync(&tp->timer); |
3739 | tp->irq_sync = 0; | 3729 | tp->irq_sync = 0; |
3740 | netif_poll_enable(tp->dev); | 3730 | napi_enable(&tp->napi); |
3741 | dev_close(tp->dev); | 3731 | dev_close(tp->dev); |
3742 | tg3_full_lock(tp, 0); | 3732 | tg3_full_lock(tp, 0); |
3743 | } | 3733 | } |
@@ -3932,7 +3922,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3932 | len = skb_headlen(skb); | 3922 | len = skb_headlen(skb); |
3933 | 3923 | ||
3934 | /* We are running in BH disabled context with netif_tx_lock | 3924 | /* We are running in BH disabled context with netif_tx_lock |
3935 | * and TX reclaim runs via tp->poll inside of a software | 3925 | * and TX reclaim runs via tp->napi.poll inside of a software |
3936 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3926 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
3937 | * no IRQ context deadlocks to worry about either. Rejoice! | 3927 | * no IRQ context deadlocks to worry about either. Rejoice! |
3938 | */ | 3928 | */ |
@@ -4087,7 +4077,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4087 | len = skb_headlen(skb); | 4077 | len = skb_headlen(skb); |
4088 | 4078 | ||
4089 | /* We are running in BH disabled context with netif_tx_lock | 4079 | /* We are running in BH disabled context with netif_tx_lock |
4090 | * and TX reclaim runs via tp->poll inside of a software | 4080 | * and TX reclaim runs via tp->napi.poll inside of a software |
4091 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 4081 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
4092 | * no IRQ context deadlocks to worry about either. Rejoice! | 4082 | * no IRQ context deadlocks to worry about either. Rejoice! |
4093 | */ | 4083 | */ |
@@ -7147,6 +7137,8 @@ static int tg3_open(struct net_device *dev) | |||
7147 | return err; | 7137 | return err; |
7148 | } | 7138 | } |
7149 | 7139 | ||
7140 | napi_enable(&tp->napi); | ||
7141 | |||
7150 | tg3_full_lock(tp, 0); | 7142 | tg3_full_lock(tp, 0); |
7151 | 7143 | ||
7152 | err = tg3_init_hw(tp, 1); | 7144 | err = tg3_init_hw(tp, 1); |
@@ -7174,6 +7166,7 @@ static int tg3_open(struct net_device *dev) | |||
7174 | tg3_full_unlock(tp); | 7166 | tg3_full_unlock(tp); |
7175 | 7167 | ||
7176 | if (err) { | 7168 | if (err) { |
7169 | napi_disable(&tp->napi); | ||
7177 | free_irq(tp->pdev->irq, dev); | 7170 | free_irq(tp->pdev->irq, dev); |
7178 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 7171 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
7179 | pci_disable_msi(tp->pdev); | 7172 | pci_disable_msi(tp->pdev); |
@@ -7199,6 +7192,8 @@ static int tg3_open(struct net_device *dev) | |||
7199 | 7192 | ||
7200 | tg3_full_unlock(tp); | 7193 | tg3_full_unlock(tp); |
7201 | 7194 | ||
7195 | napi_disable(&tp->napi); | ||
7196 | |||
7202 | return err; | 7197 | return err; |
7203 | } | 7198 | } |
7204 | 7199 | ||
@@ -7460,6 +7455,7 @@ static int tg3_close(struct net_device *dev) | |||
7460 | { | 7455 | { |
7461 | struct tg3 *tp = netdev_priv(dev); | 7456 | struct tg3 *tp = netdev_priv(dev); |
7462 | 7457 | ||
7458 | napi_disable(&tp->napi); | ||
7463 | cancel_work_sync(&tp->reset_task); | 7459 | cancel_work_sync(&tp->reset_task); |
7464 | 7460 | ||
7465 | netif_stop_queue(dev); | 7461 | netif_stop_queue(dev); |
@@ -11900,9 +11896,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
11900 | dev->set_mac_address = tg3_set_mac_addr; | 11896 | dev->set_mac_address = tg3_set_mac_addr; |
11901 | dev->do_ioctl = tg3_ioctl; | 11897 | dev->do_ioctl = tg3_ioctl; |
11902 | dev->tx_timeout = tg3_tx_timeout; | 11898 | dev->tx_timeout = tg3_tx_timeout; |
11903 | dev->poll = tg3_poll; | 11899 | netif_napi_add(dev, &tp->napi, tg3_poll, 64); |
11904 | dev->ethtool_ops = &tg3_ethtool_ops; | 11900 | dev->ethtool_ops = &tg3_ethtool_ops; |
11905 | dev->weight = 64; | ||
11906 | dev->watchdog_timeo = TG3_TX_TIMEOUT; | 11901 | dev->watchdog_timeo = TG3_TX_TIMEOUT; |
11907 | dev->change_mtu = tg3_change_mtu; | 11902 | dev->change_mtu = tg3_change_mtu; |
11908 | dev->irq = pdev->irq; | 11903 | dev->irq = pdev->irq; |
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 5c21f49026c9..a6a23bbcdfee 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
@@ -2176,6 +2176,7 @@ struct tg3 { | |||
2176 | dma_addr_t tx_desc_mapping; | 2176 | dma_addr_t tx_desc_mapping; |
2177 | 2177 | ||
2178 | /* begin "rx thread" cacheline section */ | 2178 | /* begin "rx thread" cacheline section */ |
2179 | struct napi_struct napi; | ||
2179 | void (*write32_rx_mbox) (struct tg3 *, u32, | 2180 | void (*write32_rx_mbox) (struct tg3 *, u32, |
2180 | u32); | 2181 | u32); |
2181 | u32 rx_rcb_ptr; | 2182 | u32 rx_rcb_ptr; |
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index 1aabc91f6458..b3069ee34bd2 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c | |||
@@ -79,6 +79,9 @@ struct tsi108_prv_data { | |||
79 | void __iomem *regs; /* Base of normal regs */ | 79 | void __iomem *regs; /* Base of normal regs */ |
80 | void __iomem *phyregs; /* Base of register bank used for PHY access */ | 80 | void __iomem *phyregs; /* Base of register bank used for PHY access */ |
81 | 81 | ||
82 | struct net_device *dev; | ||
83 | struct napi_struct napi; | ||
84 | |||
82 | unsigned int phy; /* Index of PHY for this interface */ | 85 | unsigned int phy; /* Index of PHY for this interface */ |
83 | unsigned int irq_num; | 86 | unsigned int irq_num; |
84 | unsigned int id; | 87 | unsigned int id; |
@@ -837,13 +840,13 @@ static int tsi108_refill_rx(struct net_device *dev, int budget) | |||
837 | return done; | 840 | return done; |
838 | } | 841 | } |
839 | 842 | ||
840 | static int tsi108_poll(struct net_device *dev, int *budget) | 843 | static int tsi108_poll(struct napi_struct *napi, int budget) |
841 | { | 844 | { |
842 | struct tsi108_prv_data *data = netdev_priv(dev); | 845 | struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi); |
846 | struct net_device *dev = data->dev; | ||
843 | u32 estat = TSI_READ(TSI108_EC_RXESTAT); | 847 | u32 estat = TSI_READ(TSI108_EC_RXESTAT); |
844 | u32 intstat = TSI_READ(TSI108_EC_INTSTAT); | 848 | u32 intstat = TSI_READ(TSI108_EC_INTSTAT); |
845 | int total_budget = min(*budget, dev->quota); | 849 | int num_received = 0, num_filled = 0; |
846 | int num_received = 0, num_filled = 0, budget_used; | ||
847 | 850 | ||
848 | intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH | | 851 | intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH | |
849 | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT; | 852 | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT; |
@@ -852,7 +855,7 @@ static int tsi108_poll(struct net_device *dev, int *budget) | |||
852 | TSI_WRITE(TSI108_EC_INTSTAT, intstat); | 855 | TSI_WRITE(TSI108_EC_INTSTAT, intstat); |
853 | 856 | ||
854 | if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT)) | 857 | if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT)) |
855 | num_received = tsi108_complete_rx(dev, total_budget); | 858 | num_received = tsi108_complete_rx(dev, budget); |
856 | 859 | ||
857 | /* This should normally fill no more slots than the number of | 860 | /* This should normally fill no more slots than the number of |
858 | * packets received in tsi108_complete_rx(). The exception | 861 | * packets received in tsi108_complete_rx(). The exception |
@@ -867,7 +870,7 @@ static int tsi108_poll(struct net_device *dev, int *budget) | |||
867 | */ | 870 | */ |
868 | 871 | ||
869 | if (data->rxfree < TSI108_RXRING_LEN) | 872 | if (data->rxfree < TSI108_RXRING_LEN) |
870 | num_filled = tsi108_refill_rx(dev, total_budget * 2); | 873 | num_filled = tsi108_refill_rx(dev, budget * 2); |
871 | 874 | ||
872 | if (intstat & TSI108_INT_RXERROR) { | 875 | if (intstat & TSI108_INT_RXERROR) { |
873 | u32 err = TSI_READ(TSI108_EC_RXERR); | 876 | u32 err = TSI_READ(TSI108_EC_RXERR); |
@@ -890,14 +893,9 @@ static int tsi108_poll(struct net_device *dev, int *budget) | |||
890 | spin_unlock_irq(&data->misclock); | 893 | spin_unlock_irq(&data->misclock); |
891 | } | 894 | } |
892 | 895 | ||
893 | budget_used = max(num_received, num_filled / 2); | 896 | if (num_received < budget) { |
894 | |||
895 | *budget -= budget_used; | ||
896 | dev->quota -= budget_used; | ||
897 | |||
898 | if (budget_used != total_budget) { | ||
899 | data->rxpending = 0; | 897 | data->rxpending = 0; |
900 | netif_rx_complete(dev); | 898 | netif_rx_complete(dev, napi); |
901 | 899 | ||
902 | TSI_WRITE(TSI108_EC_INTMASK, | 900 | TSI_WRITE(TSI108_EC_INTMASK, |
903 | TSI_READ(TSI108_EC_INTMASK) | 901 | TSI_READ(TSI108_EC_INTMASK) |
@@ -906,14 +904,11 @@ static int tsi108_poll(struct net_device *dev, int *budget) | |||
906 | TSI108_INT_RXOVERRUN | | 904 | TSI108_INT_RXOVERRUN | |
907 | TSI108_INT_RXERROR | | 905 | TSI108_INT_RXERROR | |
908 | TSI108_INT_RXWAIT)); | 906 | TSI108_INT_RXWAIT)); |
909 | |||
910 | /* IRQs are level-triggered, so no need to re-check */ | ||
911 | return 0; | ||
912 | } else { | 907 | } else { |
913 | data->rxpending = 1; | 908 | data->rxpending = 1; |
914 | } | 909 | } |
915 | 910 | ||
916 | return 1; | 911 | return num_received; |
917 | } | 912 | } |
918 | 913 | ||
919 | static void tsi108_rx_int(struct net_device *dev) | 914 | static void tsi108_rx_int(struct net_device *dev) |
@@ -931,7 +926,7 @@ static void tsi108_rx_int(struct net_device *dev) | |||
931 | * from tsi108_check_rxring(). | 926 | * from tsi108_check_rxring(). |
932 | */ | 927 | */ |
933 | 928 | ||
934 | if (netif_rx_schedule_prep(dev)) { | 929 | if (netif_rx_schedule_prep(dev, &data->napi)) { |
935 | /* Mask, rather than ack, the receive interrupts. The ack | 930 | /* Mask, rather than ack, the receive interrupts. The ack |
936 | * will happen in tsi108_poll(). | 931 | * will happen in tsi108_poll(). |
937 | */ | 932 | */ |
@@ -942,7 +937,7 @@ static void tsi108_rx_int(struct net_device *dev) | |||
942 | | TSI108_INT_RXTHRESH | | 937 | | TSI108_INT_RXTHRESH | |
943 | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | | 938 | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | |
944 | TSI108_INT_RXWAIT); | 939 | TSI108_INT_RXWAIT); |
945 | __netif_rx_schedule(dev); | 940 | __netif_rx_schedule(dev, &data->napi); |
946 | } else { | 941 | } else { |
947 | if (!netif_running(dev)) { | 942 | if (!netif_running(dev)) { |
948 | /* This can happen if an interrupt occurs while the | 943 | /* This can happen if an interrupt occurs while the |
@@ -1401,6 +1396,8 @@ static int tsi108_open(struct net_device *dev) | |||
1401 | TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma); | 1396 | TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma); |
1402 | tsi108_init_phy(dev); | 1397 | tsi108_init_phy(dev); |
1403 | 1398 | ||
1399 | napi_enable(&data->napi); | ||
1400 | |||
1404 | setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev); | 1401 | setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev); |
1405 | mod_timer(&data->timer, jiffies + 1); | 1402 | mod_timer(&data->timer, jiffies + 1); |
1406 | 1403 | ||
@@ -1425,6 +1422,7 @@ static int tsi108_close(struct net_device *dev) | |||
1425 | struct tsi108_prv_data *data = netdev_priv(dev); | 1422 | struct tsi108_prv_data *data = netdev_priv(dev); |
1426 | 1423 | ||
1427 | netif_stop_queue(dev); | 1424 | netif_stop_queue(dev); |
1425 | napi_disable(&data->napi); | ||
1428 | 1426 | ||
1429 | del_timer_sync(&data->timer); | 1427 | del_timer_sync(&data->timer); |
1430 | 1428 | ||
@@ -1562,6 +1560,7 @@ tsi108_init_one(struct platform_device *pdev) | |||
1562 | 1560 | ||
1563 | printk("tsi108_eth%d: probe...\n", pdev->id); | 1561 | printk("tsi108_eth%d: probe...\n", pdev->id); |
1564 | data = netdev_priv(dev); | 1562 | data = netdev_priv(dev); |
1563 | data->dev = dev; | ||
1565 | 1564 | ||
1566 | pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n", | 1565 | pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n", |
1567 | pdev->id, einfo->regs, einfo->phyregs, | 1566 | pdev->id, einfo->regs, einfo->phyregs, |
@@ -1597,9 +1596,8 @@ tsi108_init_one(struct platform_device *pdev) | |||
1597 | dev->set_mac_address = tsi108_set_mac; | 1596 | dev->set_mac_address = tsi108_set_mac; |
1598 | dev->set_multicast_list = tsi108_set_rx_mode; | 1597 | dev->set_multicast_list = tsi108_set_rx_mode; |
1599 | dev->get_stats = tsi108_get_stats; | 1598 | dev->get_stats = tsi108_get_stats; |
1600 | dev->poll = tsi108_poll; | 1599 | netif_napi_add(dev, &data->napi, tsi108_poll, 64); |
1601 | dev->do_ioctl = tsi108_do_ioctl; | 1600 | dev->do_ioctl = tsi108_do_ioctl; |
1602 | dev->weight = 64; /* 64 is more suitable for GigE interface - klai */ | ||
1603 | 1601 | ||
1604 | /* Apparently, the Linux networking code won't use scatter-gather | 1602 | /* Apparently, the Linux networking code won't use scatter-gather |
1605 | * if the hardware doesn't do checksums. However, it's faster | 1603 | * if the hardware doesn't do checksums. However, it's faster |
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index 53efd6694e75..365331446387 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c | |||
@@ -103,28 +103,29 @@ int tulip_refill_rx(struct net_device *dev) | |||
103 | void oom_timer(unsigned long data) | 103 | void oom_timer(unsigned long data) |
104 | { | 104 | { |
105 | struct net_device *dev = (struct net_device *)data; | 105 | struct net_device *dev = (struct net_device *)data; |
106 | netif_rx_schedule(dev); | 106 | struct tulip_private *tp = netdev_priv(dev); |
107 | netif_rx_schedule(dev, &tp->napi); | ||
107 | } | 108 | } |
108 | 109 | ||
109 | int tulip_poll(struct net_device *dev, int *budget) | 110 | int tulip_poll(struct napi_struct *napi, int budget) |
110 | { | 111 | { |
111 | struct tulip_private *tp = netdev_priv(dev); | 112 | struct tulip_private *tp = container_of(napi, struct tulip_private, napi); |
113 | struct net_device *dev = tp->dev; | ||
112 | int entry = tp->cur_rx % RX_RING_SIZE; | 114 | int entry = tp->cur_rx % RX_RING_SIZE; |
113 | int rx_work_limit = *budget; | 115 | int work_done = 0; |
116 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
114 | int received = 0; | 117 | int received = 0; |
118 | #endif | ||
115 | 119 | ||
116 | if (!netif_running(dev)) | 120 | if (!netif_running(dev)) |
117 | goto done; | 121 | goto done; |
118 | 122 | ||
119 | if (rx_work_limit > dev->quota) | ||
120 | rx_work_limit = dev->quota; | ||
121 | |||
122 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | 123 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
123 | 124 | ||
124 | /* that one buffer is needed for mit activation; or might be a | 125 | /* that one buffer is needed for mit activation; or might be a |
125 | bug in the ring buffer code; check later -- JHS*/ | 126 | bug in the ring buffer code; check later -- JHS*/ |
126 | 127 | ||
127 | if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--; | 128 | if (budget >=RX_RING_SIZE) budget--; |
128 | #endif | 129 | #endif |
129 | 130 | ||
130 | if (tulip_debug > 4) | 131 | if (tulip_debug > 4) |
@@ -144,14 +145,13 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
144 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { | 145 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { |
145 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); | 146 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); |
146 | 147 | ||
147 | |||
148 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) | 148 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) |
149 | break; | 149 | break; |
150 | 150 | ||
151 | if (tulip_debug > 5) | 151 | if (tulip_debug > 5) |
152 | printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", | 152 | printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", |
153 | dev->name, entry, status); | 153 | dev->name, entry, status); |
154 | if (--rx_work_limit < 0) | 154 | if (work_done++ >= budget) |
155 | goto not_done; | 155 | goto not_done; |
156 | 156 | ||
157 | if ((status & 0x38008300) != 0x0300) { | 157 | if ((status & 0x38008300) != 0x0300) { |
@@ -238,7 +238,9 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
238 | tp->stats.rx_packets++; | 238 | tp->stats.rx_packets++; |
239 | tp->stats.rx_bytes += pkt_len; | 239 | tp->stats.rx_bytes += pkt_len; |
240 | } | 240 | } |
241 | received++; | 241 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
242 | received++; | ||
243 | #endif | ||
242 | 244 | ||
243 | entry = (++tp->cur_rx) % RX_RING_SIZE; | 245 | entry = (++tp->cur_rx) % RX_RING_SIZE; |
244 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) | 246 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) |
@@ -296,17 +298,15 @@ done: | |||
296 | 298 | ||
297 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ | 299 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ |
298 | 300 | ||
299 | dev->quota -= received; | ||
300 | *budget -= received; | ||
301 | |||
302 | tulip_refill_rx(dev); | 301 | tulip_refill_rx(dev); |
303 | 302 | ||
304 | /* If RX ring is not full we are out of memory. */ | 303 | /* If RX ring is not full we are out of memory. */ |
305 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; | 304 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
305 | goto oom; | ||
306 | 306 | ||
307 | /* Remove us from polling list and enable RX intr. */ | 307 | /* Remove us from polling list and enable RX intr. */ |
308 | 308 | ||
309 | netif_rx_complete(dev); | 309 | netif_rx_complete(dev, napi); |
310 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); | 310 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); |
311 | 311 | ||
312 | /* The last op happens after poll completion. Which means the following: | 312 | /* The last op happens after poll completion. Which means the following: |
@@ -320,28 +320,20 @@ done: | |||
320 | * processed irqs. But it must not result in losing events. | 320 | * processed irqs. But it must not result in losing events. |
321 | */ | 321 | */ |
322 | 322 | ||
323 | return 0; | 323 | return work_done; |
324 | 324 | ||
325 | not_done: | 325 | not_done: |
326 | if (!received) { | ||
327 | |||
328 | received = dev->quota; /* Not to happen */ | ||
329 | } | ||
330 | dev->quota -= received; | ||
331 | *budget -= received; | ||
332 | |||
333 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || | 326 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || |
334 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | 327 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
335 | tulip_refill_rx(dev); | 328 | tulip_refill_rx(dev); |
336 | 329 | ||
337 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; | 330 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
338 | 331 | goto oom; | |
339 | return 1; | ||
340 | 332 | ||
333 | return work_done; | ||
341 | 334 | ||
342 | oom: /* Executed with RX ints disabled */ | 335 | oom: /* Executed with RX ints disabled */ |
343 | 336 | ||
344 | |||
345 | /* Start timer, stop polling, but do not enable rx interrupts. */ | 337 | /* Start timer, stop polling, but do not enable rx interrupts. */ |
346 | mod_timer(&tp->oom_timer, jiffies+1); | 338 | mod_timer(&tp->oom_timer, jiffies+1); |
347 | 339 | ||
@@ -350,9 +342,9 @@ done: | |||
350 | * before we did netif_rx_complete(). See? We would lose it. */ | 342 | * before we did netif_rx_complete(). See? We would lose it. */ |
351 | 343 | ||
352 | /* remove ourselves from the polling list */ | 344 | /* remove ourselves from the polling list */ |
353 | netif_rx_complete(dev); | 345 | netif_rx_complete(dev, napi); |
354 | 346 | ||
355 | return 0; | 347 | return work_done; |
356 | } | 348 | } |
357 | 349 | ||
358 | #else /* CONFIG_TULIP_NAPI */ | 350 | #else /* CONFIG_TULIP_NAPI */ |
@@ -534,7 +526,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance) | |||
534 | rxd++; | 526 | rxd++; |
535 | /* Mask RX intrs and add the device to poll list. */ | 527 | /* Mask RX intrs and add the device to poll list. */ |
536 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); | 528 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); |
537 | netif_rx_schedule(dev); | 529 | netif_rx_schedule(dev, &tp->napi); |
538 | 530 | ||
539 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) | 531 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) |
540 | break; | 532 | break; |
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 16f26a8364f0..5a4d7270973e 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h | |||
@@ -353,6 +353,7 @@ struct tulip_private { | |||
353 | int chip_id; | 353 | int chip_id; |
354 | int revision; | 354 | int revision; |
355 | int flags; | 355 | int flags; |
356 | struct napi_struct napi; | ||
356 | struct net_device_stats stats; | 357 | struct net_device_stats stats; |
357 | struct timer_list timer; /* Media selection timer. */ | 358 | struct timer_list timer; /* Media selection timer. */ |
358 | struct timer_list oom_timer; /* Out of memory timer. */ | 359 | struct timer_list oom_timer; /* Out of memory timer. */ |
@@ -429,7 +430,7 @@ extern int tulip_rx_copybreak; | |||
429 | irqreturn_t tulip_interrupt(int irq, void *dev_instance); | 430 | irqreturn_t tulip_interrupt(int irq, void *dev_instance); |
430 | int tulip_refill_rx(struct net_device *dev); | 431 | int tulip_refill_rx(struct net_device *dev); |
431 | #ifdef CONFIG_TULIP_NAPI | 432 | #ifdef CONFIG_TULIP_NAPI |
432 | int tulip_poll(struct net_device *dev, int *budget); | 433 | int tulip_poll(struct napi_struct *napi, int budget); |
433 | #endif | 434 | #endif |
434 | 435 | ||
435 | 436 | ||
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index eca984f89bbf..7040a59fa3c9 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -294,6 +294,10 @@ static void tulip_up(struct net_device *dev) | |||
294 | int next_tick = 3*HZ; | 294 | int next_tick = 3*HZ; |
295 | int i; | 295 | int i; |
296 | 296 | ||
297 | #ifdef CONFIG_TULIP_NAPI | ||
298 | napi_enable(&tp->napi); | ||
299 | #endif | ||
300 | |||
297 | /* Wake the chip from sleep/snooze mode. */ | 301 | /* Wake the chip from sleep/snooze mode. */ |
298 | tulip_set_power_state (tp, 0, 0); | 302 | tulip_set_power_state (tp, 0, 0); |
299 | 303 | ||
@@ -728,6 +732,10 @@ static void tulip_down (struct net_device *dev) | |||
728 | 732 | ||
729 | flush_scheduled_work(); | 733 | flush_scheduled_work(); |
730 | 734 | ||
735 | #ifdef CONFIG_TULIP_NAPI | ||
736 | napi_disable(&tp->napi); | ||
737 | #endif | ||
738 | |||
731 | del_timer_sync (&tp->timer); | 739 | del_timer_sync (&tp->timer); |
732 | #ifdef CONFIG_TULIP_NAPI | 740 | #ifdef CONFIG_TULIP_NAPI |
733 | del_timer_sync (&tp->oom_timer); | 741 | del_timer_sync (&tp->oom_timer); |
@@ -1606,8 +1614,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1606 | dev->tx_timeout = tulip_tx_timeout; | 1614 | dev->tx_timeout = tulip_tx_timeout; |
1607 | dev->watchdog_timeo = TX_TIMEOUT; | 1615 | dev->watchdog_timeo = TX_TIMEOUT; |
1608 | #ifdef CONFIG_TULIP_NAPI | 1616 | #ifdef CONFIG_TULIP_NAPI |
1609 | dev->poll = tulip_poll; | 1617 | netif_napi_add(dev, &tp->napi, tulip_poll, 16); |
1610 | dev->weight = 16; | ||
1611 | #endif | 1618 | #endif |
1612 | dev->stop = tulip_close; | 1619 | dev->stop = tulip_close; |
1613 | dev->get_stats = tulip_get_stats; | 1620 | dev->get_stats = tulip_get_stats; |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 03587205546e..0377b8b64c78 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -284,6 +284,7 @@ struct typhoon { | |||
284 | struct basic_ring rxLoRing; | 284 | struct basic_ring rxLoRing; |
285 | struct pci_dev * pdev; | 285 | struct pci_dev * pdev; |
286 | struct net_device * dev; | 286 | struct net_device * dev; |
287 | struct napi_struct napi; | ||
287 | spinlock_t state_lock; | 288 | spinlock_t state_lock; |
288 | struct vlan_group * vlgrp; | 289 | struct vlan_group * vlgrp; |
289 | struct basic_ring rxHiRing; | 290 | struct basic_ring rxHiRing; |
@@ -1759,12 +1760,12 @@ typhoon_fill_free_ring(struct typhoon *tp) | |||
1759 | } | 1760 | } |
1760 | 1761 | ||
1761 | static int | 1762 | static int |
1762 | typhoon_poll(struct net_device *dev, int *total_budget) | 1763 | typhoon_poll(struct napi_struct *napi, int budget) |
1763 | { | 1764 | { |
1764 | struct typhoon *tp = netdev_priv(dev); | 1765 | struct typhoon *tp = container_of(napi, struct typhoon, napi); |
1766 | struct net_device *dev = tp->dev; | ||
1765 | struct typhoon_indexes *indexes = tp->indexes; | 1767 | struct typhoon_indexes *indexes = tp->indexes; |
1766 | int orig_budget = *total_budget; | 1768 | int work_done; |
1767 | int budget, work_done, done; | ||
1768 | 1769 | ||
1769 | rmb(); | 1770 | rmb(); |
1770 | if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared) | 1771 | if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared) |
@@ -1773,30 +1774,16 @@ typhoon_poll(struct net_device *dev, int *total_budget) | |||
1773 | if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead) | 1774 | if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead) |
1774 | typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared); | 1775 | typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared); |
1775 | 1776 | ||
1776 | if(orig_budget > dev->quota) | ||
1777 | orig_budget = dev->quota; | ||
1778 | |||
1779 | budget = orig_budget; | ||
1780 | work_done = 0; | 1777 | work_done = 0; |
1781 | done = 1; | ||
1782 | 1778 | ||
1783 | if(indexes->rxHiCleared != indexes->rxHiReady) { | 1779 | if(indexes->rxHiCleared != indexes->rxHiReady) { |
1784 | work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, | 1780 | work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, |
1785 | &indexes->rxHiCleared, budget); | 1781 | &indexes->rxHiCleared, budget); |
1786 | budget -= work_done; | ||
1787 | } | 1782 | } |
1788 | 1783 | ||
1789 | if(indexes->rxLoCleared != indexes->rxLoReady) { | 1784 | if(indexes->rxLoCleared != indexes->rxLoReady) { |
1790 | work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady, | 1785 | work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady, |
1791 | &indexes->rxLoCleared, budget); | 1786 | &indexes->rxLoCleared, budget - work_done); |
1792 | } | ||
1793 | |||
1794 | if(work_done) { | ||
1795 | *total_budget -= work_done; | ||
1796 | dev->quota -= work_done; | ||
1797 | |||
1798 | if(work_done >= orig_budget) | ||
1799 | done = 0; | ||
1800 | } | 1787 | } |
1801 | 1788 | ||
1802 | if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) { | 1789 | if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) { |
@@ -1804,14 +1791,14 @@ typhoon_poll(struct net_device *dev, int *total_budget) | |||
1804 | typhoon_fill_free_ring(tp); | 1791 | typhoon_fill_free_ring(tp); |
1805 | } | 1792 | } |
1806 | 1793 | ||
1807 | if(done) { | 1794 | if (work_done < budget) { |
1808 | netif_rx_complete(dev); | 1795 | netif_rx_complete(dev, napi); |
1809 | iowrite32(TYPHOON_INTR_NONE, | 1796 | iowrite32(TYPHOON_INTR_NONE, |
1810 | tp->ioaddr + TYPHOON_REG_INTR_MASK); | 1797 | tp->ioaddr + TYPHOON_REG_INTR_MASK); |
1811 | typhoon_post_pci_writes(tp->ioaddr); | 1798 | typhoon_post_pci_writes(tp->ioaddr); |
1812 | } | 1799 | } |
1813 | 1800 | ||
1814 | return (done ? 0 : 1); | 1801 | return work_done; |
1815 | } | 1802 | } |
1816 | 1803 | ||
1817 | static irqreturn_t | 1804 | static irqreturn_t |
@@ -1828,10 +1815,10 @@ typhoon_interrupt(int irq, void *dev_instance) | |||
1828 | 1815 | ||
1829 | iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); | 1816 | iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); |
1830 | 1817 | ||
1831 | if(netif_rx_schedule_prep(dev)) { | 1818 | if (netif_rx_schedule_prep(dev, &tp->napi)) { |
1832 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); | 1819 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); |
1833 | typhoon_post_pci_writes(ioaddr); | 1820 | typhoon_post_pci_writes(ioaddr); |
1834 | __netif_rx_schedule(dev); | 1821 | __netif_rx_schedule(dev, &tp->napi); |
1835 | } else { | 1822 | } else { |
1836 | printk(KERN_ERR "%s: Error, poll already scheduled\n", | 1823 | printk(KERN_ERR "%s: Error, poll already scheduled\n", |
1837 | dev->name); | 1824 | dev->name); |
@@ -2119,9 +2106,13 @@ typhoon_open(struct net_device *dev) | |||
2119 | if(err < 0) | 2106 | if(err < 0) |
2120 | goto out_sleep; | 2107 | goto out_sleep; |
2121 | 2108 | ||
2109 | napi_enable(&tp->napi); | ||
2110 | |||
2122 | err = typhoon_start_runtime(tp); | 2111 | err = typhoon_start_runtime(tp); |
2123 | if(err < 0) | 2112 | if(err < 0) { |
2113 | napi_disable(&tp->napi); | ||
2124 | goto out_irq; | 2114 | goto out_irq; |
2115 | } | ||
2125 | 2116 | ||
2126 | netif_start_queue(dev); | 2117 | netif_start_queue(dev); |
2127 | return 0; | 2118 | return 0; |
@@ -2150,6 +2141,7 @@ typhoon_close(struct net_device *dev) | |||
2150 | struct typhoon *tp = netdev_priv(dev); | 2141 | struct typhoon *tp = netdev_priv(dev); |
2151 | 2142 | ||
2152 | netif_stop_queue(dev); | 2143 | netif_stop_queue(dev); |
2144 | napi_disable(&tp->napi); | ||
2153 | 2145 | ||
2154 | if(typhoon_stop_runtime(tp, WaitSleep) < 0) | 2146 | if(typhoon_stop_runtime(tp, WaitSleep) < 0) |
2155 | printk(KERN_ERR "%s: unable to stop runtime\n", dev->name); | 2147 | printk(KERN_ERR "%s: unable to stop runtime\n", dev->name); |
@@ -2521,8 +2513,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2521 | dev->stop = typhoon_close; | 2513 | dev->stop = typhoon_close; |
2522 | dev->set_multicast_list = typhoon_set_rx_mode; | 2514 | dev->set_multicast_list = typhoon_set_rx_mode; |
2523 | dev->tx_timeout = typhoon_tx_timeout; | 2515 | dev->tx_timeout = typhoon_tx_timeout; |
2524 | dev->poll = typhoon_poll; | 2516 | netif_napi_add(dev, &tp->napi, typhoon_poll, 16); |
2525 | dev->weight = 16; | ||
2526 | dev->watchdog_timeo = TX_TIMEOUT; | 2517 | dev->watchdog_timeo = TX_TIMEOUT; |
2527 | dev->get_stats = typhoon_get_stats; | 2518 | dev->get_stats = typhoon_get_stats; |
2528 | dev->set_mac_address = typhoon_set_mac_address; | 2519 | dev->set_mac_address = typhoon_set_mac_address; |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 9a38dfe45f8f..72f617bf2520 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3582,41 +3582,31 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3582 | } | 3582 | } |
3583 | 3583 | ||
3584 | #ifdef CONFIG_UGETH_NAPI | 3584 | #ifdef CONFIG_UGETH_NAPI |
3585 | static int ucc_geth_poll(struct net_device *dev, int *budget) | 3585 | static int ucc_geth_poll(struct napi_struct *napi, int budget) |
3586 | { | 3586 | { |
3587 | struct ucc_geth_private *ugeth = netdev_priv(dev); | 3587 | struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); |
3588 | struct net_device *dev = ugeth->dev; | ||
3588 | struct ucc_geth_info *ug_info; | 3589 | struct ucc_geth_info *ug_info; |
3589 | struct ucc_fast_private *uccf; | 3590 | int howmany, i; |
3590 | int howmany; | ||
3591 | u8 i; | ||
3592 | int rx_work_limit; | ||
3593 | register u32 uccm; | ||
3594 | 3591 | ||
3595 | ug_info = ugeth->ug_info; | 3592 | ug_info = ugeth->ug_info; |
3596 | 3593 | ||
3597 | rx_work_limit = *budget; | ||
3598 | if (rx_work_limit > dev->quota) | ||
3599 | rx_work_limit = dev->quota; | ||
3600 | |||
3601 | howmany = 0; | 3594 | howmany = 0; |
3595 | for (i = 0; i < ug_info->numQueuesRx; i++) | ||
3596 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); | ||
3602 | 3597 | ||
3603 | for (i = 0; i < ug_info->numQueuesRx; i++) { | 3598 | if (howmany < budget) { |
3604 | howmany += ucc_geth_rx(ugeth, i, rx_work_limit); | 3599 | struct ucc_fast_private *uccf; |
3605 | } | 3600 | u32 uccm; |
3606 | |||
3607 | dev->quota -= howmany; | ||
3608 | rx_work_limit -= howmany; | ||
3609 | *budget -= howmany; | ||
3610 | 3601 | ||
3611 | if (rx_work_limit > 0) { | 3602 | netif_rx_complete(dev, napi); |
3612 | netif_rx_complete(dev); | ||
3613 | uccf = ugeth->uccf; | 3603 | uccf = ugeth->uccf; |
3614 | uccm = in_be32(uccf->p_uccm); | 3604 | uccm = in_be32(uccf->p_uccm); |
3615 | uccm |= UCCE_RX_EVENTS; | 3605 | uccm |= UCCE_RX_EVENTS; |
3616 | out_be32(uccf->p_uccm, uccm); | 3606 | out_be32(uccf->p_uccm, uccm); |
3617 | } | 3607 | } |
3618 | 3608 | ||
3619 | return (rx_work_limit > 0) ? 0 : 1; | 3609 | return howmany; |
3620 | } | 3610 | } |
3621 | #endif /* CONFIG_UGETH_NAPI */ | 3611 | #endif /* CONFIG_UGETH_NAPI */ |
3622 | 3612 | ||
@@ -3651,10 +3641,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) | |||
3651 | /* check for receive events that require processing */ | 3641 | /* check for receive events that require processing */ |
3652 | if (ucce & UCCE_RX_EVENTS) { | 3642 | if (ucce & UCCE_RX_EVENTS) { |
3653 | #ifdef CONFIG_UGETH_NAPI | 3643 | #ifdef CONFIG_UGETH_NAPI |
3654 | if (netif_rx_schedule_prep(dev)) { | 3644 | if (netif_rx_schedule_prep(dev, &ugeth->napi)) { |
3655 | uccm &= ~UCCE_RX_EVENTS; | 3645 | uccm &= ~UCCE_RX_EVENTS; |
3656 | out_be32(uccf->p_uccm, uccm); | 3646 | out_be32(uccf->p_uccm, uccm); |
3657 | __netif_rx_schedule(dev); | 3647 | __netif_rx_schedule(dev, &ugeth->napi); |
3658 | } | 3648 | } |
3659 | #else | 3649 | #else |
3660 | rx_mask = UCCE_RXBF_SINGLE_MASK; | 3650 | rx_mask = UCCE_RXBF_SINGLE_MASK; |
@@ -3717,12 +3707,15 @@ static int ucc_geth_open(struct net_device *dev) | |||
3717 | return err; | 3707 | return err; |
3718 | } | 3708 | } |
3719 | 3709 | ||
3710 | #ifdef CONFIG_UGETH_NAPI | ||
3711 | napi_enable(&ugeth->napi); | ||
3712 | #endif | ||
3720 | err = ucc_geth_startup(ugeth); | 3713 | err = ucc_geth_startup(ugeth); |
3721 | if (err) { | 3714 | if (err) { |
3722 | if (netif_msg_ifup(ugeth)) | 3715 | if (netif_msg_ifup(ugeth)) |
3723 | ugeth_err("%s: Cannot configure net device, aborting.", | 3716 | ugeth_err("%s: Cannot configure net device, aborting.", |
3724 | dev->name); | 3717 | dev->name); |
3725 | return err; | 3718 | goto out_err; |
3726 | } | 3719 | } |
3727 | 3720 | ||
3728 | err = adjust_enet_interface(ugeth); | 3721 | err = adjust_enet_interface(ugeth); |
@@ -3730,7 +3723,7 @@ static int ucc_geth_open(struct net_device *dev) | |||
3730 | if (netif_msg_ifup(ugeth)) | 3723 | if (netif_msg_ifup(ugeth)) |
3731 | ugeth_err("%s: Cannot configure net device, aborting.", | 3724 | ugeth_err("%s: Cannot configure net device, aborting.", |
3732 | dev->name); | 3725 | dev->name); |
3733 | return err; | 3726 | goto out_err; |
3734 | } | 3727 | } |
3735 | 3728 | ||
3736 | /* Set MACSTNADDR1, MACSTNADDR2 */ | 3729 | /* Set MACSTNADDR1, MACSTNADDR2 */ |
@@ -3748,7 +3741,7 @@ static int ucc_geth_open(struct net_device *dev) | |||
3748 | if (err) { | 3741 | if (err) { |
3749 | if (netif_msg_ifup(ugeth)) | 3742 | if (netif_msg_ifup(ugeth)) |
3750 | ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); | 3743 | ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); |
3751 | return err; | 3744 | goto out_err; |
3752 | } | 3745 | } |
3753 | 3746 | ||
3754 | phy_start(ugeth->phydev); | 3747 | phy_start(ugeth->phydev); |
@@ -3761,7 +3754,7 @@ static int ucc_geth_open(struct net_device *dev) | |||
3761 | ugeth_err("%s: Cannot get IRQ for net device, aborting.", | 3754 | ugeth_err("%s: Cannot get IRQ for net device, aborting.", |
3762 | dev->name); | 3755 | dev->name); |
3763 | ucc_geth_stop(ugeth); | 3756 | ucc_geth_stop(ugeth); |
3764 | return err; | 3757 | goto out_err; |
3765 | } | 3758 | } |
3766 | 3759 | ||
3767 | err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | 3760 | err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); |
@@ -3769,12 +3762,18 @@ static int ucc_geth_open(struct net_device *dev) | |||
3769 | if (netif_msg_ifup(ugeth)) | 3762 | if (netif_msg_ifup(ugeth)) |
3770 | ugeth_err("%s: Cannot enable net device, aborting.", dev->name); | 3763 | ugeth_err("%s: Cannot enable net device, aborting.", dev->name); |
3771 | ucc_geth_stop(ugeth); | 3764 | ucc_geth_stop(ugeth); |
3772 | return err; | 3765 | goto out_err; |
3773 | } | 3766 | } |
3774 | 3767 | ||
3775 | netif_start_queue(dev); | 3768 | netif_start_queue(dev); |
3776 | 3769 | ||
3777 | return err; | 3770 | return err; |
3771 | |||
3772 | out_err: | ||
3773 | #ifdef CONFIG_UGETH_NAPI | ||
3774 | napi_disable(&ugeth->napi); | ||
3775 | #endif | ||
3776 | return err; | ||
3778 | } | 3777 | } |
3779 | 3778 | ||
3780 | /* Stops the kernel queue, and halts the controller */ | 3779 | /* Stops the kernel queue, and halts the controller */ |
@@ -3784,6 +3783,10 @@ static int ucc_geth_close(struct net_device *dev) | |||
3784 | 3783 | ||
3785 | ugeth_vdbg("%s: IN", __FUNCTION__); | 3784 | ugeth_vdbg("%s: IN", __FUNCTION__); |
3786 | 3785 | ||
3786 | #ifdef CONFIG_UGETH_NAPI | ||
3787 | napi_disable(&ugeth->napi); | ||
3788 | #endif | ||
3789 | |||
3787 | ucc_geth_stop(ugeth); | 3790 | ucc_geth_stop(ugeth); |
3788 | 3791 | ||
3789 | phy_disconnect(ugeth->phydev); | 3792 | phy_disconnect(ugeth->phydev); |
@@ -3964,8 +3967,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
3964 | dev->tx_timeout = ucc_geth_timeout; | 3967 | dev->tx_timeout = ucc_geth_timeout; |
3965 | dev->watchdog_timeo = TX_TIMEOUT; | 3968 | dev->watchdog_timeo = TX_TIMEOUT; |
3966 | #ifdef CONFIG_UGETH_NAPI | 3969 | #ifdef CONFIG_UGETH_NAPI |
3967 | dev->poll = ucc_geth_poll; | 3970 | netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); |
3968 | dev->weight = UCC_GETH_DEV_WEIGHT; | ||
3969 | #endif /* CONFIG_UGETH_NAPI */ | 3971 | #endif /* CONFIG_UGETH_NAPI */ |
3970 | dev->stop = ucc_geth_close; | 3972 | dev->stop = ucc_geth_close; |
3971 | dev->get_stats = ucc_geth_get_stats; | 3973 | dev->get_stats = ucc_geth_get_stats; |
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index bb4dac8c0c65..0579ba081aa5 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h | |||
@@ -1184,6 +1184,7 @@ struct ucc_geth_private { | |||
1184 | struct ucc_geth_info *ug_info; | 1184 | struct ucc_geth_info *ug_info; |
1185 | struct ucc_fast_private *uccf; | 1185 | struct ucc_fast_private *uccf; |
1186 | struct net_device *dev; | 1186 | struct net_device *dev; |
1187 | struct napi_struct napi; | ||
1187 | struct net_device_stats stats; /* linux network statistics */ | 1188 | struct net_device_stats stats; /* linux network statistics */ |
1188 | struct ucc_geth *ug_regs; | 1189 | struct ucc_geth *ug_regs; |
1189 | struct ucc_geth_init_pram *p_init_enet_param_shadow; | 1190 | struct ucc_geth_init_pram *p_init_enet_param_shadow; |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index b56dff26772d..7a5899059c44 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -389,6 +389,8 @@ struct rhine_private { | |||
389 | 389 | ||
390 | struct pci_dev *pdev; | 390 | struct pci_dev *pdev; |
391 | long pioaddr; | 391 | long pioaddr; |
392 | struct net_device *dev; | ||
393 | struct napi_struct napi; | ||
392 | struct net_device_stats stats; | 394 | struct net_device_stats stats; |
393 | spinlock_t lock; | 395 | spinlock_t lock; |
394 | 396 | ||
@@ -582,28 +584,25 @@ static void rhine_poll(struct net_device *dev) | |||
582 | #endif | 584 | #endif |
583 | 585 | ||
584 | #ifdef CONFIG_VIA_RHINE_NAPI | 586 | #ifdef CONFIG_VIA_RHINE_NAPI |
585 | static int rhine_napipoll(struct net_device *dev, int *budget) | 587 | static int rhine_napipoll(struct napi_struct *napi, int budget) |
586 | { | 588 | { |
587 | struct rhine_private *rp = netdev_priv(dev); | 589 | struct rhine_private *rp = container_of(napi, struct rhine_private, napi); |
590 | struct net_device *dev = rp->dev; | ||
588 | void __iomem *ioaddr = rp->base; | 591 | void __iomem *ioaddr = rp->base; |
589 | int done, limit = min(dev->quota, *budget); | 592 | int work_done; |
590 | 593 | ||
591 | done = rhine_rx(dev, limit); | 594 | work_done = rhine_rx(dev, budget); |
592 | *budget -= done; | ||
593 | dev->quota -= done; | ||
594 | 595 | ||
595 | if (done < limit) { | 596 | if (work_done < budget) { |
596 | netif_rx_complete(dev); | 597 | netif_rx_complete(dev, napi); |
597 | 598 | ||
598 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | | 599 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | |
599 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | | 600 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | |
600 | IntrTxDone | IntrTxError | IntrTxUnderrun | | 601 | IntrTxDone | IntrTxError | IntrTxUnderrun | |
601 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | 602 | IntrPCIErr | IntrStatsMax | IntrLinkChange, |
602 | ioaddr + IntrEnable); | 603 | ioaddr + IntrEnable); |
603 | return 0; | ||
604 | } | 604 | } |
605 | else | 605 | return work_done; |
606 | return 1; | ||
607 | } | 606 | } |
608 | #endif | 607 | #endif |
609 | 608 | ||
@@ -707,6 +706,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
707 | SET_NETDEV_DEV(dev, &pdev->dev); | 706 | SET_NETDEV_DEV(dev, &pdev->dev); |
708 | 707 | ||
709 | rp = netdev_priv(dev); | 708 | rp = netdev_priv(dev); |
709 | rp->dev = dev; | ||
710 | rp->quirks = quirks; | 710 | rp->quirks = quirks; |
711 | rp->pioaddr = pioaddr; | 711 | rp->pioaddr = pioaddr; |
712 | rp->pdev = pdev; | 712 | rp->pdev = pdev; |
@@ -785,8 +785,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
785 | dev->poll_controller = rhine_poll; | 785 | dev->poll_controller = rhine_poll; |
786 | #endif | 786 | #endif |
787 | #ifdef CONFIG_VIA_RHINE_NAPI | 787 | #ifdef CONFIG_VIA_RHINE_NAPI |
788 | dev->poll = rhine_napipoll; | 788 | netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); |
789 | dev->weight = 64; | ||
790 | #endif | 789 | #endif |
791 | if (rp->quirks & rqRhineI) | 790 | if (rp->quirks & rqRhineI) |
792 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; | 791 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; |
@@ -1061,7 +1060,9 @@ static void init_registers(struct net_device *dev) | |||
1061 | 1060 | ||
1062 | rhine_set_rx_mode(dev); | 1061 | rhine_set_rx_mode(dev); |
1063 | 1062 | ||
1064 | netif_poll_enable(dev); | 1063 | #ifdef CONFIG_VIA_RHINE_NAPI |
1064 | napi_enable(&rp->napi); | ||
1065 | #endif | ||
1065 | 1066 | ||
1066 | /* Enable interrupts by setting the interrupt mask. */ | 1067 | /* Enable interrupts by setting the interrupt mask. */ |
1067 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | | 1068 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | |
@@ -1196,6 +1197,10 @@ static void rhine_tx_timeout(struct net_device *dev) | |||
1196 | /* protect against concurrent rx interrupts */ | 1197 | /* protect against concurrent rx interrupts */ |
1197 | disable_irq(rp->pdev->irq); | 1198 | disable_irq(rp->pdev->irq); |
1198 | 1199 | ||
1200 | #ifdef CONFIG_VIA_RHINE_NAPI | ||
1201 | napi_disable(&rp->napi); | ||
1202 | #endif | ||
1203 | |||
1199 | spin_lock(&rp->lock); | 1204 | spin_lock(&rp->lock); |
1200 | 1205 | ||
1201 | /* clear all descriptors */ | 1206 | /* clear all descriptors */ |
@@ -1324,7 +1329,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance) | |||
1324 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | 1329 | IntrPCIErr | IntrStatsMax | IntrLinkChange, |
1325 | ioaddr + IntrEnable); | 1330 | ioaddr + IntrEnable); |
1326 | 1331 | ||
1327 | netif_rx_schedule(dev); | 1332 | netif_rx_schedule(dev, &rp->napi); |
1328 | #else | 1333 | #else |
1329 | rhine_rx(dev, RX_RING_SIZE); | 1334 | rhine_rx(dev, RX_RING_SIZE); |
1330 | #endif | 1335 | #endif |
@@ -1837,7 +1842,9 @@ static int rhine_close(struct net_device *dev) | |||
1837 | spin_lock_irq(&rp->lock); | 1842 | spin_lock_irq(&rp->lock); |
1838 | 1843 | ||
1839 | netif_stop_queue(dev); | 1844 | netif_stop_queue(dev); |
1840 | netif_poll_disable(dev); | 1845 | #ifdef CONFIG_VIA_RHINE_NAPI |
1846 | napi_disable(&rp->napi); | ||
1847 | #endif | ||
1841 | 1848 | ||
1842 | if (debug > 1) | 1849 | if (debug > 1) |
1843 | printk(KERN_DEBUG "%s: Shutting down ethercard, " | 1850 | printk(KERN_DEBUG "%s: Shutting down ethercard, " |
@@ -1936,6 +1943,9 @@ static int rhine_suspend(struct pci_dev *pdev, pm_message_t state) | |||
1936 | if (!netif_running(dev)) | 1943 | if (!netif_running(dev)) |
1937 | return 0; | 1944 | return 0; |
1938 | 1945 | ||
1946 | #ifdef CONFIG_VIA_RHINE_NAPI | ||
1947 | napi_disable(&rp->napi); | ||
1948 | #endif | ||
1939 | netif_device_detach(dev); | 1949 | netif_device_detach(dev); |
1940 | pci_save_state(pdev); | 1950 | pci_save_state(pdev); |
1941 | 1951 | ||
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 4445810335a8..70e551c19e3a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -72,6 +72,7 @@ struct netfront_info { | |||
72 | struct list_head list; | 72 | struct list_head list; |
73 | struct net_device *netdev; | 73 | struct net_device *netdev; |
74 | 74 | ||
75 | struct napi_struct napi; | ||
75 | struct net_device_stats stats; | 76 | struct net_device_stats stats; |
76 | 77 | ||
77 | struct xen_netif_tx_front_ring tx; | 78 | struct xen_netif_tx_front_ring tx; |
@@ -185,7 +186,8 @@ static int xennet_can_sg(struct net_device *dev) | |||
185 | static void rx_refill_timeout(unsigned long data) | 186 | static void rx_refill_timeout(unsigned long data) |
186 | { | 187 | { |
187 | struct net_device *dev = (struct net_device *)data; | 188 | struct net_device *dev = (struct net_device *)data; |
188 | netif_rx_schedule(dev); | 189 | struct netfront_info *np = netdev_priv(dev); |
190 | netif_rx_schedule(dev, &np->napi); | ||
189 | } | 191 | } |
190 | 192 | ||
191 | static int netfront_tx_slot_available(struct netfront_info *np) | 193 | static int netfront_tx_slot_available(struct netfront_info *np) |
@@ -342,12 +344,14 @@ static int xennet_open(struct net_device *dev) | |||
342 | 344 | ||
343 | memset(&np->stats, 0, sizeof(np->stats)); | 345 | memset(&np->stats, 0, sizeof(np->stats)); |
344 | 346 | ||
347 | napi_enable(&np->napi); | ||
348 | |||
345 | spin_lock_bh(&np->rx_lock); | 349 | spin_lock_bh(&np->rx_lock); |
346 | if (netif_carrier_ok(dev)) { | 350 | if (netif_carrier_ok(dev)) { |
347 | xennet_alloc_rx_buffers(dev); | 351 | xennet_alloc_rx_buffers(dev); |
348 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; | 352 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; |
349 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | 353 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) |
350 | netif_rx_schedule(dev); | 354 | netif_rx_schedule(dev, &np->napi); |
351 | } | 355 | } |
352 | spin_unlock_bh(&np->rx_lock); | 356 | spin_unlock_bh(&np->rx_lock); |
353 | 357 | ||
@@ -589,6 +593,7 @@ static int xennet_close(struct net_device *dev) | |||
589 | { | 593 | { |
590 | struct netfront_info *np = netdev_priv(dev); | 594 | struct netfront_info *np = netdev_priv(dev); |
591 | netif_stop_queue(np->netdev); | 595 | netif_stop_queue(np->netdev); |
596 | napi_disable(&np->napi); | ||
592 | return 0; | 597 | return 0; |
593 | } | 598 | } |
594 | 599 | ||
@@ -872,15 +877,16 @@ static int handle_incoming_queue(struct net_device *dev, | |||
872 | return packets_dropped; | 877 | return packets_dropped; |
873 | } | 878 | } |
874 | 879 | ||
875 | static int xennet_poll(struct net_device *dev, int *pbudget) | 880 | static int xennet_poll(struct napi_struct *napi, int budget) |
876 | { | 881 | { |
877 | struct netfront_info *np = netdev_priv(dev); | 882 | struct netfront_info *np = container_of(napi, struct netfront_info, napi); |
883 | struct net_device *dev = np->netdev; | ||
878 | struct sk_buff *skb; | 884 | struct sk_buff *skb; |
879 | struct netfront_rx_info rinfo; | 885 | struct netfront_rx_info rinfo; |
880 | struct xen_netif_rx_response *rx = &rinfo.rx; | 886 | struct xen_netif_rx_response *rx = &rinfo.rx; |
881 | struct xen_netif_extra_info *extras = rinfo.extras; | 887 | struct xen_netif_extra_info *extras = rinfo.extras; |
882 | RING_IDX i, rp; | 888 | RING_IDX i, rp; |
883 | int work_done, budget, more_to_do = 1; | 889 | int work_done; |
884 | struct sk_buff_head rxq; | 890 | struct sk_buff_head rxq; |
885 | struct sk_buff_head errq; | 891 | struct sk_buff_head errq; |
886 | struct sk_buff_head tmpq; | 892 | struct sk_buff_head tmpq; |
@@ -899,9 +905,6 @@ static int xennet_poll(struct net_device *dev, int *pbudget) | |||
899 | skb_queue_head_init(&errq); | 905 | skb_queue_head_init(&errq); |
900 | skb_queue_head_init(&tmpq); | 906 | skb_queue_head_init(&tmpq); |
901 | 907 | ||
902 | budget = *pbudget; | ||
903 | if (budget > dev->quota) | ||
904 | budget = dev->quota; | ||
905 | rp = np->rx.sring->rsp_prod; | 908 | rp = np->rx.sring->rsp_prod; |
906 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | 909 | rmb(); /* Ensure we see queued responses up to 'rp'. */ |
907 | 910 | ||
@@ -1006,22 +1009,21 @@ err: | |||
1006 | 1009 | ||
1007 | xennet_alloc_rx_buffers(dev); | 1010 | xennet_alloc_rx_buffers(dev); |
1008 | 1011 | ||
1009 | *pbudget -= work_done; | ||
1010 | dev->quota -= work_done; | ||
1011 | |||
1012 | if (work_done < budget) { | 1012 | if (work_done < budget) { |
1013 | int more_to_do = 0; | ||
1014 | |||
1013 | local_irq_save(flags); | 1015 | local_irq_save(flags); |
1014 | 1016 | ||
1015 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); | 1017 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); |
1016 | if (!more_to_do) | 1018 | if (!more_to_do) |
1017 | __netif_rx_complete(dev); | 1019 | __netif_rx_complete(dev, napi); |
1018 | 1020 | ||
1019 | local_irq_restore(flags); | 1021 | local_irq_restore(flags); |
1020 | } | 1022 | } |
1021 | 1023 | ||
1022 | spin_unlock(&np->rx_lock); | 1024 | spin_unlock(&np->rx_lock); |
1023 | 1025 | ||
1024 | return more_to_do; | 1026 | return work_done; |
1025 | } | 1027 | } |
1026 | 1028 | ||
1027 | static int xennet_change_mtu(struct net_device *dev, int mtu) | 1029 | static int xennet_change_mtu(struct net_device *dev, int mtu) |
@@ -1201,10 +1203,9 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev | |||
1201 | netdev->hard_start_xmit = xennet_start_xmit; | 1203 | netdev->hard_start_xmit = xennet_start_xmit; |
1202 | netdev->stop = xennet_close; | 1204 | netdev->stop = xennet_close; |
1203 | netdev->get_stats = xennet_get_stats; | 1205 | netdev->get_stats = xennet_get_stats; |
1204 | netdev->poll = xennet_poll; | 1206 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); |
1205 | netdev->uninit = xennet_uninit; | 1207 | netdev->uninit = xennet_uninit; |
1206 | netdev->change_mtu = xennet_change_mtu; | 1208 | netdev->change_mtu = xennet_change_mtu; |
1207 | netdev->weight = 64; | ||
1208 | netdev->features = NETIF_F_IP_CSUM; | 1209 | netdev->features = NETIF_F_IP_CSUM; |
1209 | 1210 | ||
1210 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); | 1211 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); |
@@ -1349,7 +1350,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id) | |||
1349 | xennet_tx_buf_gc(dev); | 1350 | xennet_tx_buf_gc(dev); |
1350 | /* Under tx_lock: protects access to rx shared-ring indexes. */ | 1351 | /* Under tx_lock: protects access to rx shared-ring indexes. */ |
1351 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | 1352 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) |
1352 | netif_rx_schedule(dev); | 1353 | netif_rx_schedule(dev, &np->napi); |
1353 | } | 1354 | } |
1354 | 1355 | ||
1355 | spin_unlock_irqrestore(&np->tx_lock, flags); | 1356 | spin_unlock_irqrestore(&np->tx_lock, flags); |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e679b2751665..b93575db8cce 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #ifdef __KERNEL__ | 32 | #ifdef __KERNEL__ |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/delay.h> | ||
34 | #include <asm/atomic.h> | 35 | #include <asm/atomic.h> |
35 | #include <asm/cache.h> | 36 | #include <asm/cache.h> |
36 | #include <asm/byteorder.h> | 37 | #include <asm/byteorder.h> |
@@ -38,6 +39,7 @@ | |||
38 | #include <linux/device.h> | 39 | #include <linux/device.h> |
39 | #include <linux/percpu.h> | 40 | #include <linux/percpu.h> |
40 | #include <linux/dmaengine.h> | 41 | #include <linux/dmaengine.h> |
42 | #include <linux/workqueue.h> | ||
41 | 43 | ||
42 | struct vlan_group; | 44 | struct vlan_group; |
43 | struct ethtool_ops; | 45 | struct ethtool_ops; |
@@ -258,7 +260,6 @@ enum netdev_state_t | |||
258 | __LINK_STATE_PRESENT, | 260 | __LINK_STATE_PRESENT, |
259 | __LINK_STATE_SCHED, | 261 | __LINK_STATE_SCHED, |
260 | __LINK_STATE_NOCARRIER, | 262 | __LINK_STATE_NOCARRIER, |
261 | __LINK_STATE_RX_SCHED, | ||
262 | __LINK_STATE_LINKWATCH_PENDING, | 263 | __LINK_STATE_LINKWATCH_PENDING, |
263 | __LINK_STATE_DORMANT, | 264 | __LINK_STATE_DORMANT, |
264 | __LINK_STATE_QDISC_RUNNING, | 265 | __LINK_STATE_QDISC_RUNNING, |
@@ -278,6 +279,110 @@ struct netdev_boot_setup { | |||
278 | extern int __init netdev_boot_setup(char *str); | 279 | extern int __init netdev_boot_setup(char *str); |
279 | 280 | ||
280 | /* | 281 | /* |
282 | * Structure for NAPI scheduling similar to tasklet but with weighting | ||
283 | */ | ||
284 | struct napi_struct { | ||
285 | /* The poll_list must only be managed by the entity which | ||
286 | * changes the state of the NAPI_STATE_SCHED bit. This means | ||
287 | * whoever atomically sets that bit can add this napi_struct | ||
288 | * to the per-cpu poll_list, and whoever clears that bit | ||
289 | * can remove from the list right before clearing the bit. | ||
290 | */ | ||
291 | struct list_head poll_list; | ||
292 | |||
293 | unsigned long state; | ||
294 | int weight; | ||
295 | int (*poll)(struct napi_struct *, int); | ||
296 | #ifdef CONFIG_NETPOLL | ||
297 | spinlock_t poll_lock; | ||
298 | int poll_owner; | ||
299 | struct net_device *dev; | ||
300 | struct list_head dev_list; | ||
301 | #endif | ||
302 | }; | ||
303 | |||
304 | enum | ||
305 | { | ||
306 | NAPI_STATE_SCHED, /* Poll is scheduled */ | ||
307 | }; | ||
308 | |||
309 | extern void FASTCALL(__napi_schedule(struct napi_struct *n)); | ||
310 | |||
311 | /** | ||
312 | * napi_schedule_prep - check if napi can be scheduled | ||
313 | * @n: napi context | ||
314 | * | ||
315 | * Test if NAPI routine is already running, and if not mark | ||
316 | * it as running. This is used as a condition variable | ||
317 | * insure only one NAPI poll instance runs | ||
318 | */ | ||
319 | static inline int napi_schedule_prep(struct napi_struct *n) | ||
320 | { | ||
321 | return !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | ||
322 | } | ||
323 | |||
324 | /** | ||
325 | * napi_schedule - schedule NAPI poll | ||
326 | * @n: napi context | ||
327 | * | ||
328 | * Schedule NAPI poll routine to be called if it is not already | ||
329 | * running. | ||
330 | */ | ||
331 | static inline void napi_schedule(struct napi_struct *n) | ||
332 | { | ||
333 | if (napi_schedule_prep(n)) | ||
334 | __napi_schedule(n); | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * napi_complete - NAPI processing complete | ||
339 | * @n: napi context | ||
340 | * | ||
341 | * Mark NAPI processing as complete. | ||
342 | */ | ||
343 | static inline void __napi_complete(struct napi_struct *n) | ||
344 | { | ||
345 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | ||
346 | list_del(&n->poll_list); | ||
347 | smp_mb__before_clear_bit(); | ||
348 | clear_bit(NAPI_STATE_SCHED, &n->state); | ||
349 | } | ||
350 | |||
351 | static inline void napi_complete(struct napi_struct *n) | ||
352 | { | ||
353 | local_irq_disable(); | ||
354 | __napi_complete(n); | ||
355 | local_irq_enable(); | ||
356 | } | ||
357 | |||
358 | /** | ||
359 | * napi_disable - prevent NAPI from scheduling | ||
360 | * @n: napi context | ||
361 | * | ||
362 | * Stop NAPI from being scheduled on this context. | ||
363 | * Waits till any outstanding processing completes. | ||
364 | */ | ||
365 | static inline void napi_disable(struct napi_struct *n) | ||
366 | { | ||
367 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | ||
368 | msleep_interruptible(1); | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * napi_enable - enable NAPI scheduling | ||
373 | * @n: napi context | ||
374 | * | ||
375 | * Resume NAPI from being scheduled on this context. | ||
376 | * Must be paired with napi_disable. | ||
377 | */ | ||
378 | static inline void napi_enable(struct napi_struct *n) | ||
379 | { | ||
380 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | ||
381 | smp_mb__before_clear_bit(); | ||
382 | clear_bit(NAPI_STATE_SCHED, &n->state); | ||
383 | } | ||
384 | |||
385 | /* | ||
281 | * The DEVICE structure. | 386 | * The DEVICE structure. |
282 | * Actually, this whole structure is a big mistake. It mixes I/O | 387 | * Actually, this whole structure is a big mistake. It mixes I/O |
283 | * data with strictly "high-level" data, and it has to know about | 388 | * data with strictly "high-level" data, and it has to know about |
@@ -319,6 +424,9 @@ struct net_device | |||
319 | unsigned long state; | 424 | unsigned long state; |
320 | 425 | ||
321 | struct list_head dev_list; | 426 | struct list_head dev_list; |
427 | #ifdef CONFIG_NETPOLL | ||
428 | struct list_head napi_list; | ||
429 | #endif | ||
322 | 430 | ||
323 | /* The device initialization function. Called only once. */ | 431 | /* The device initialization function. Called only once. */ |
324 | int (*init)(struct net_device *dev); | 432 | int (*init)(struct net_device *dev); |
@@ -430,12 +538,6 @@ struct net_device | |||
430 | /* | 538 | /* |
431 | * Cache line mostly used on receive path (including eth_type_trans()) | 539 | * Cache line mostly used on receive path (including eth_type_trans()) |
432 | */ | 540 | */ |
433 | struct list_head poll_list ____cacheline_aligned_in_smp; | ||
434 | /* Link to poll list */ | ||
435 | |||
436 | int (*poll) (struct net_device *dev, int *quota); | ||
437 | int quota; | ||
438 | int weight; | ||
439 | unsigned long last_rx; /* Time of last Rx */ | 541 | unsigned long last_rx; /* Time of last Rx */ |
440 | /* Interface address info used in eth_type_trans() */ | 542 | /* Interface address info used in eth_type_trans() */ |
441 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast | 543 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast |
@@ -582,6 +684,12 @@ struct net_device | |||
582 | #define NETDEV_ALIGN 32 | 684 | #define NETDEV_ALIGN 32 |
583 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) | 685 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) |
584 | 686 | ||
687 | /** | ||
688 | * netdev_priv - access network device private data | ||
689 | * @dev: network device | ||
690 | * | ||
691 | * Get network device private data | ||
692 | */ | ||
585 | static inline void *netdev_priv(const struct net_device *dev) | 693 | static inline void *netdev_priv(const struct net_device *dev) |
586 | { | 694 | { |
587 | return dev->priv; | 695 | return dev->priv; |
@@ -593,6 +701,23 @@ static inline void *netdev_priv(const struct net_device *dev) | |||
593 | */ | 701 | */ |
594 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) | 702 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
595 | 703 | ||
704 | static inline void netif_napi_add(struct net_device *dev, | ||
705 | struct napi_struct *napi, | ||
706 | int (*poll)(struct napi_struct *, int), | ||
707 | int weight) | ||
708 | { | ||
709 | INIT_LIST_HEAD(&napi->poll_list); | ||
710 | napi->poll = poll; | ||
711 | napi->weight = weight; | ||
712 | #ifdef CONFIG_NETPOLL | ||
713 | napi->dev = dev; | ||
714 | list_add(&napi->dev_list, &dev->napi_list); | ||
715 | spin_lock_init(&napi->poll_lock); | ||
716 | napi->poll_owner = -1; | ||
717 | #endif | ||
718 | set_bit(NAPI_STATE_SCHED, &napi->state); | ||
719 | } | ||
720 | |||
596 | struct packet_type { | 721 | struct packet_type { |
597 | __be16 type; /* This is really htons(ether_type). */ | 722 | __be16 type; /* This is really htons(ether_type). */ |
598 | struct net_device *dev; /* NULL is wildcarded here */ | 723 | struct net_device *dev; /* NULL is wildcarded here */ |
@@ -678,7 +803,6 @@ static inline int unregister_gifconf(unsigned int family) | |||
678 | * Incoming packets are placed on per-cpu queues so that | 803 | * Incoming packets are placed on per-cpu queues so that |
679 | * no locking is needed. | 804 | * no locking is needed. |
680 | */ | 805 | */ |
681 | |||
682 | struct softnet_data | 806 | struct softnet_data |
683 | { | 807 | { |
684 | struct net_device *output_queue; | 808 | struct net_device *output_queue; |
@@ -686,7 +810,7 @@ struct softnet_data | |||
686 | struct list_head poll_list; | 810 | struct list_head poll_list; |
687 | struct sk_buff *completion_queue; | 811 | struct sk_buff *completion_queue; |
688 | 812 | ||
689 | struct net_device backlog_dev; /* Sorry. 8) */ | 813 | struct napi_struct backlog; |
690 | #ifdef CONFIG_NET_DMA | 814 | #ifdef CONFIG_NET_DMA |
691 | struct dma_chan *net_dma; | 815 | struct dma_chan *net_dma; |
692 | #endif | 816 | #endif |
@@ -704,11 +828,24 @@ static inline void netif_schedule(struct net_device *dev) | |||
704 | __netif_schedule(dev); | 828 | __netif_schedule(dev); |
705 | } | 829 | } |
706 | 830 | ||
831 | /** | ||
832 | * netif_start_queue - allow transmit | ||
833 | * @dev: network device | ||
834 | * | ||
835 | * Allow upper layers to call the device hard_start_xmit routine. | ||
836 | */ | ||
707 | static inline void netif_start_queue(struct net_device *dev) | 837 | static inline void netif_start_queue(struct net_device *dev) |
708 | { | 838 | { |
709 | clear_bit(__LINK_STATE_XOFF, &dev->state); | 839 | clear_bit(__LINK_STATE_XOFF, &dev->state); |
710 | } | 840 | } |
711 | 841 | ||
842 | /** | ||
843 | * netif_wake_queue - restart transmit | ||
844 | * @dev: network device | ||
845 | * | ||
846 | * Allow upper layers to call the device hard_start_xmit routine. | ||
847 | * Used for flow control when transmit resources are available. | ||
848 | */ | ||
712 | static inline void netif_wake_queue(struct net_device *dev) | 849 | static inline void netif_wake_queue(struct net_device *dev) |
713 | { | 850 | { |
714 | #ifdef CONFIG_NETPOLL_TRAP | 851 | #ifdef CONFIG_NETPOLL_TRAP |
@@ -721,16 +858,35 @@ static inline void netif_wake_queue(struct net_device *dev) | |||
721 | __netif_schedule(dev); | 858 | __netif_schedule(dev); |
722 | } | 859 | } |
723 | 860 | ||
861 | /** | ||
862 | * netif_stop_queue - stop transmitted packets | ||
863 | * @dev: network device | ||
864 | * | ||
865 | * Stop upper layers calling the device hard_start_xmit routine. | ||
866 | * Used for flow control when transmit resources are unavailable. | ||
867 | */ | ||
724 | static inline void netif_stop_queue(struct net_device *dev) | 868 | static inline void netif_stop_queue(struct net_device *dev) |
725 | { | 869 | { |
726 | set_bit(__LINK_STATE_XOFF, &dev->state); | 870 | set_bit(__LINK_STATE_XOFF, &dev->state); |
727 | } | 871 | } |
728 | 872 | ||
873 | /** | ||
874 | * netif_queue_stopped - test if transmit queue is flowblocked | ||
875 | * @dev: network device | ||
876 | * | ||
877 | * Test if transmit queue on device is currently unable to send. | ||
878 | */ | ||
729 | static inline int netif_queue_stopped(const struct net_device *dev) | 879 | static inline int netif_queue_stopped(const struct net_device *dev) |
730 | { | 880 | { |
731 | return test_bit(__LINK_STATE_XOFF, &dev->state); | 881 | return test_bit(__LINK_STATE_XOFF, &dev->state); |
732 | } | 882 | } |
733 | 883 | ||
884 | /** | ||
885 | * netif_running - test if up | ||
886 | * @dev: network device | ||
887 | * | ||
888 | * Test if the device has been brought up. | ||
889 | */ | ||
734 | static inline int netif_running(const struct net_device *dev) | 890 | static inline int netif_running(const struct net_device *dev) |
735 | { | 891 | { |
736 | return test_bit(__LINK_STATE_START, &dev->state); | 892 | return test_bit(__LINK_STATE_START, &dev->state); |
@@ -742,6 +898,14 @@ static inline int netif_running(const struct net_device *dev) | |||
742 | * done at the overall netdevice level. | 898 | * done at the overall netdevice level. |
743 | * Also test the device if we're multiqueue. | 899 | * Also test the device if we're multiqueue. |
744 | */ | 900 | */ |
901 | |||
902 | /** | ||
903 | * netif_start_subqueue - allow sending packets on subqueue | ||
904 | * @dev: network device | ||
905 | * @queue_index: sub queue index | ||
906 | * | ||
907 | * Start individual transmit queue of a device with multiple transmit queues. | ||
908 | */ | ||
745 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 909 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
746 | { | 910 | { |
747 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 911 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
@@ -749,6 +913,13 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | |||
749 | #endif | 913 | #endif |
750 | } | 914 | } |
751 | 915 | ||
916 | /** | ||
917 | * netif_stop_subqueue - stop sending packets on subqueue | ||
918 | * @dev: network device | ||
919 | * @queue_index: sub queue index | ||
920 | * | ||
921 | * Stop individual transmit queue of a device with multiple transmit queues. | ||
922 | */ | ||
752 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | 923 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
753 | { | 924 | { |
754 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 925 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
@@ -760,6 +931,13 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | |||
760 | #endif | 931 | #endif |
761 | } | 932 | } |
762 | 933 | ||
934 | /** | ||
935 | * netif_subqueue_stopped - test status of subqueue | ||
936 | * @dev: network device | ||
937 | * @queue_index: sub queue index | ||
938 | * | ||
939 | * Check individual transmit queue of a device with multiple transmit queues. | ||
940 | */ | ||
763 | static inline int netif_subqueue_stopped(const struct net_device *dev, | 941 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
764 | u16 queue_index) | 942 | u16 queue_index) |
765 | { | 943 | { |
@@ -771,6 +949,14 @@ static inline int netif_subqueue_stopped(const struct net_device *dev, | |||
771 | #endif | 949 | #endif |
772 | } | 950 | } |
773 | 951 | ||
952 | |||
953 | /** | ||
954 | * netif_wake_subqueue - allow sending packets on subqueue | ||
955 | * @dev: network device | ||
956 | * @queue_index: sub queue index | ||
957 | * | ||
958 | * Resume individual transmit queue of a device with multiple transmit queues. | ||
959 | */ | ||
774 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | 960 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
775 | { | 961 | { |
776 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 962 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
@@ -784,6 +970,13 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
784 | #endif | 970 | #endif |
785 | } | 971 | } |
786 | 972 | ||
973 | /** | ||
974 | * netif_is_multiqueue - test if device has multiple transmit queues | ||
975 | * @dev: network device | ||
976 | * | ||
977 | * Check if device has multiple transmit queues | ||
978 | * Always falls if NETDEVICE_MULTIQUEUE is not configured | ||
979 | */ | ||
787 | static inline int netif_is_multiqueue(const struct net_device *dev) | 980 | static inline int netif_is_multiqueue(const struct net_device *dev) |
788 | { | 981 | { |
789 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 982 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE |
@@ -796,20 +989,7 @@ static inline int netif_is_multiqueue(const struct net_device *dev) | |||
796 | /* Use this variant when it is known for sure that it | 989 | /* Use this variant when it is known for sure that it |
797 | * is executing from interrupt context. | 990 | * is executing from interrupt context. |
798 | */ | 991 | */ |
799 | static inline void dev_kfree_skb_irq(struct sk_buff *skb) | 992 | extern void dev_kfree_skb_irq(struct sk_buff *skb); |
800 | { | ||
801 | if (atomic_dec_and_test(&skb->users)) { | ||
802 | struct softnet_data *sd; | ||
803 | unsigned long flags; | ||
804 | |||
805 | local_irq_save(flags); | ||
806 | sd = &__get_cpu_var(softnet_data); | ||
807 | skb->next = sd->completion_queue; | ||
808 | sd->completion_queue = skb; | ||
809 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | ||
810 | local_irq_restore(flags); | ||
811 | } | ||
812 | } | ||
813 | 993 | ||
814 | /* Use this variant in places where it could be invoked | 994 | /* Use this variant in places where it could be invoked |
815 | * either from interrupt or non-interrupt context. | 995 | * either from interrupt or non-interrupt context. |
@@ -833,18 +1013,28 @@ extern int dev_set_mac_address(struct net_device *, | |||
833 | extern int dev_hard_start_xmit(struct sk_buff *skb, | 1013 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
834 | struct net_device *dev); | 1014 | struct net_device *dev); |
835 | 1015 | ||
836 | extern void dev_init(void); | ||
837 | |||
838 | extern int netdev_budget; | 1016 | extern int netdev_budget; |
839 | 1017 | ||
840 | /* Called by rtnetlink.c:rtnl_unlock() */ | 1018 | /* Called by rtnetlink.c:rtnl_unlock() */ |
841 | extern void netdev_run_todo(void); | 1019 | extern void netdev_run_todo(void); |
842 | 1020 | ||
1021 | /** | ||
1022 | * dev_put - release reference to device | ||
1023 | * @dev: network device | ||
1024 | * | ||
1025 | * Hold reference to device to keep it from being freed. | ||
1026 | */ | ||
843 | static inline void dev_put(struct net_device *dev) | 1027 | static inline void dev_put(struct net_device *dev) |
844 | { | 1028 | { |
845 | atomic_dec(&dev->refcnt); | 1029 | atomic_dec(&dev->refcnt); |
846 | } | 1030 | } |
847 | 1031 | ||
1032 | /** | ||
1033 | * dev_hold - get reference to device | ||
1034 | * @dev: network device | ||
1035 | * | ||
1036 | * Release reference to device to allow it to be freed. | ||
1037 | */ | ||
848 | static inline void dev_hold(struct net_device *dev) | 1038 | static inline void dev_hold(struct net_device *dev) |
849 | { | 1039 | { |
850 | atomic_inc(&dev->refcnt); | 1040 | atomic_inc(&dev->refcnt); |
@@ -861,6 +1051,12 @@ static inline void dev_hold(struct net_device *dev) | |||
861 | 1051 | ||
862 | extern void linkwatch_fire_event(struct net_device *dev); | 1052 | extern void linkwatch_fire_event(struct net_device *dev); |
863 | 1053 | ||
1054 | /** | ||
1055 | * netif_carrier_ok - test if carrier present | ||
1056 | * @dev: network device | ||
1057 | * | ||
1058 | * Check if carrier is present on device | ||
1059 | */ | ||
864 | static inline int netif_carrier_ok(const struct net_device *dev) | 1060 | static inline int netif_carrier_ok(const struct net_device *dev) |
865 | { | 1061 | { |
866 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | 1062 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); |
@@ -872,30 +1068,66 @@ extern void netif_carrier_on(struct net_device *dev); | |||
872 | 1068 | ||
873 | extern void netif_carrier_off(struct net_device *dev); | 1069 | extern void netif_carrier_off(struct net_device *dev); |
874 | 1070 | ||
1071 | /** | ||
1072 | * netif_dormant_on - mark device as dormant. | ||
1073 | * @dev: network device | ||
1074 | * | ||
1075 | * Mark device as dormant (as per RFC2863). | ||
1076 | * | ||
1077 | * The dormant state indicates that the relevant interface is not | ||
1078 | * actually in a condition to pass packets (i.e., it is not 'up') but is | ||
1079 | * in a "pending" state, waiting for some external event. For "on- | ||
1080 | * demand" interfaces, this new state identifies the situation where the | ||
1081 | * interface is waiting for events to place it in the up state. | ||
1082 | * | ||
1083 | */ | ||
875 | static inline void netif_dormant_on(struct net_device *dev) | 1084 | static inline void netif_dormant_on(struct net_device *dev) |
876 | { | 1085 | { |
877 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | 1086 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) |
878 | linkwatch_fire_event(dev); | 1087 | linkwatch_fire_event(dev); |
879 | } | 1088 | } |
880 | 1089 | ||
1090 | /** | ||
1091 | * netif_dormant_off - set device as not dormant. | ||
1092 | * @dev: network device | ||
1093 | * | ||
1094 | * Device is not in dormant state. | ||
1095 | */ | ||
881 | static inline void netif_dormant_off(struct net_device *dev) | 1096 | static inline void netif_dormant_off(struct net_device *dev) |
882 | { | 1097 | { |
883 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | 1098 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) |
884 | linkwatch_fire_event(dev); | 1099 | linkwatch_fire_event(dev); |
885 | } | 1100 | } |
886 | 1101 | ||
1102 | /** | ||
1103 | * netif_dormant - test if carrier present | ||
1104 | * @dev: network device | ||
1105 | * | ||
1106 | * Check if carrier is present on device | ||
1107 | */ | ||
887 | static inline int netif_dormant(const struct net_device *dev) | 1108 | static inline int netif_dormant(const struct net_device *dev) |
888 | { | 1109 | { |
889 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | 1110 | return test_bit(__LINK_STATE_DORMANT, &dev->state); |
890 | } | 1111 | } |
891 | 1112 | ||
892 | 1113 | ||
1114 | /** | ||
1115 | * netif_oper_up - test if device is operational | ||
1116 | * @dev: network device | ||
1117 | * | ||
1118 | * Check if carrier is operational | ||
1119 | */ | ||
893 | static inline int netif_oper_up(const struct net_device *dev) { | 1120 | static inline int netif_oper_up(const struct net_device *dev) { |
894 | return (dev->operstate == IF_OPER_UP || | 1121 | return (dev->operstate == IF_OPER_UP || |
895 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | 1122 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); |
896 | } | 1123 | } |
897 | 1124 | ||
898 | /* Hot-plugging. */ | 1125 | /** |
1126 | * netif_device_present - is device available or removed | ||
1127 | * @dev: network device | ||
1128 | * | ||
1129 | * Check if device has not been removed from system. | ||
1130 | */ | ||
899 | static inline int netif_device_present(struct net_device *dev) | 1131 | static inline int netif_device_present(struct net_device *dev) |
900 | { | 1132 | { |
901 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | 1133 | return test_bit(__LINK_STATE_PRESENT, &dev->state); |
@@ -955,46 +1187,38 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |||
955 | return (1 << debug_value) - 1; | 1187 | return (1 << debug_value) - 1; |
956 | } | 1188 | } |
957 | 1189 | ||
958 | /* Test if receive needs to be scheduled */ | ||
959 | static inline int __netif_rx_schedule_prep(struct net_device *dev) | ||
960 | { | ||
961 | return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
962 | } | ||
963 | |||
964 | /* Test if receive needs to be scheduled but only if up */ | 1190 | /* Test if receive needs to be scheduled but only if up */ |
965 | static inline int netif_rx_schedule_prep(struct net_device *dev) | 1191 | static inline int netif_rx_schedule_prep(struct net_device *dev, |
1192 | struct napi_struct *napi) | ||
966 | { | 1193 | { |
967 | return netif_running(dev) && __netif_rx_schedule_prep(dev); | 1194 | return netif_running(dev) && napi_schedule_prep(napi); |
968 | } | 1195 | } |
969 | 1196 | ||
970 | /* Add interface to tail of rx poll list. This assumes that _prep has | 1197 | /* Add interface to tail of rx poll list. This assumes that _prep has |
971 | * already been called and returned 1. | 1198 | * already been called and returned 1. |
972 | */ | 1199 | */ |
973 | 1200 | static inline void __netif_rx_schedule(struct net_device *dev, | |
974 | extern void __netif_rx_schedule(struct net_device *dev); | 1201 | struct napi_struct *napi) |
1202 | { | ||
1203 | dev_hold(dev); | ||
1204 | __napi_schedule(napi); | ||
1205 | } | ||
975 | 1206 | ||
976 | /* Try to reschedule poll. Called by irq handler. */ | 1207 | /* Try to reschedule poll. Called by irq handler. */ |
977 | 1208 | ||
978 | static inline void netif_rx_schedule(struct net_device *dev) | 1209 | static inline void netif_rx_schedule(struct net_device *dev, |
1210 | struct napi_struct *napi) | ||
979 | { | 1211 | { |
980 | if (netif_rx_schedule_prep(dev)) | 1212 | if (netif_rx_schedule_prep(dev, napi)) |
981 | __netif_rx_schedule(dev); | 1213 | __netif_rx_schedule(dev, napi); |
982 | } | 1214 | } |
983 | 1215 | ||
984 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). | 1216 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ |
985 | * Do not inline this? | 1217 | static inline int netif_rx_reschedule(struct net_device *dev, |
986 | */ | 1218 | struct napi_struct *napi) |
987 | static inline int netif_rx_reschedule(struct net_device *dev, int undo) | ||
988 | { | 1219 | { |
989 | if (netif_rx_schedule_prep(dev)) { | 1220 | if (napi_schedule_prep(napi)) { |
990 | unsigned long flags; | 1221 | __netif_rx_schedule(dev, napi); |
991 | |||
992 | dev->quota += undo; | ||
993 | |||
994 | local_irq_save(flags); | ||
995 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); | ||
996 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
997 | local_irq_restore(flags); | ||
998 | return 1; | 1222 | return 1; |
999 | } | 1223 | } |
1000 | return 0; | 1224 | return 0; |
@@ -1003,12 +1227,11 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo) | |||
1003 | /* same as netif_rx_complete, except that local_irq_save(flags) | 1227 | /* same as netif_rx_complete, except that local_irq_save(flags) |
1004 | * has already been issued | 1228 | * has already been issued |
1005 | */ | 1229 | */ |
1006 | static inline void __netif_rx_complete(struct net_device *dev) | 1230 | static inline void __netif_rx_complete(struct net_device *dev, |
1231 | struct napi_struct *napi) | ||
1007 | { | 1232 | { |
1008 | BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); | 1233 | __napi_complete(napi); |
1009 | list_del(&dev->poll_list); | 1234 | dev_put(dev); |
1010 | smp_mb__before_clear_bit(); | ||
1011 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
1012 | } | 1235 | } |
1013 | 1236 | ||
1014 | /* Remove interface from poll list: it must be in the poll list | 1237 | /* Remove interface from poll list: it must be in the poll list |
@@ -1016,28 +1239,22 @@ static inline void __netif_rx_complete(struct net_device *dev) | |||
1016 | * it completes the work. The device cannot be out of poll list at this | 1239 | * it completes the work. The device cannot be out of poll list at this |
1017 | * moment, it is BUG(). | 1240 | * moment, it is BUG(). |
1018 | */ | 1241 | */ |
1019 | static inline void netif_rx_complete(struct net_device *dev) | 1242 | static inline void netif_rx_complete(struct net_device *dev, |
1243 | struct napi_struct *napi) | ||
1020 | { | 1244 | { |
1021 | unsigned long flags; | 1245 | unsigned long flags; |
1022 | 1246 | ||
1023 | local_irq_save(flags); | 1247 | local_irq_save(flags); |
1024 | __netif_rx_complete(dev); | 1248 | __netif_rx_complete(dev, napi); |
1025 | local_irq_restore(flags); | 1249 | local_irq_restore(flags); |
1026 | } | 1250 | } |
1027 | 1251 | ||
1028 | static inline void netif_poll_disable(struct net_device *dev) | 1252 | /** |
1029 | { | 1253 | * netif_tx_lock - grab network device transmit lock |
1030 | while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) | 1254 | * @dev: network device |
1031 | /* No hurry. */ | 1255 | * |
1032 | schedule_timeout_interruptible(1); | 1256 | * Get network device transmit lock |
1033 | } | 1257 | */ |
1034 | |||
1035 | static inline void netif_poll_enable(struct net_device *dev) | ||
1036 | { | ||
1037 | smp_mb__before_clear_bit(); | ||
1038 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
1039 | } | ||
1040 | |||
1041 | static inline void netif_tx_lock(struct net_device *dev) | 1258 | static inline void netif_tx_lock(struct net_device *dev) |
1042 | { | 1259 | { |
1043 | spin_lock(&dev->_xmit_lock); | 1260 | spin_lock(&dev->_xmit_lock); |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 29930b71a9aa..08dcc39ec18d 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -25,8 +25,6 @@ struct netpoll { | |||
25 | 25 | ||
26 | struct netpoll_info { | 26 | struct netpoll_info { |
27 | atomic_t refcnt; | 27 | atomic_t refcnt; |
28 | spinlock_t poll_lock; | ||
29 | int poll_owner; | ||
30 | int rx_flags; | 28 | int rx_flags; |
31 | spinlock_t rx_lock; | 29 | spinlock_t rx_lock; |
32 | struct netpoll *rx_np; /* netpoll that registered an rx_hook */ | 30 | struct netpoll *rx_np; /* netpoll that registered an rx_hook */ |
@@ -64,32 +62,61 @@ static inline int netpoll_rx(struct sk_buff *skb) | |||
64 | return ret; | 62 | return ret; |
65 | } | 63 | } |
66 | 64 | ||
67 | static inline void *netpoll_poll_lock(struct net_device *dev) | 65 | static inline int netpoll_receive_skb(struct sk_buff *skb) |
68 | { | 66 | { |
67 | if (!list_empty(&skb->dev->napi_list)) | ||
68 | return netpoll_rx(skb); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static inline void *netpoll_poll_lock(struct napi_struct *napi) | ||
73 | { | ||
74 | struct net_device *dev = napi->dev; | ||
75 | |||
69 | rcu_read_lock(); /* deal with race on ->npinfo */ | 76 | rcu_read_lock(); /* deal with race on ->npinfo */ |
70 | if (dev->npinfo) { | 77 | if (dev && dev->npinfo) { |
71 | spin_lock(&dev->npinfo->poll_lock); | 78 | spin_lock(&napi->poll_lock); |
72 | dev->npinfo->poll_owner = smp_processor_id(); | 79 | napi->poll_owner = smp_processor_id(); |
73 | return dev->npinfo; | 80 | return napi; |
74 | } | 81 | } |
75 | return NULL; | 82 | return NULL; |
76 | } | 83 | } |
77 | 84 | ||
78 | static inline void netpoll_poll_unlock(void *have) | 85 | static inline void netpoll_poll_unlock(void *have) |
79 | { | 86 | { |
80 | struct netpoll_info *npi = have; | 87 | struct napi_struct *napi = have; |
81 | 88 | ||
82 | if (npi) { | 89 | if (napi) { |
83 | npi->poll_owner = -1; | 90 | napi->poll_owner = -1; |
84 | spin_unlock(&npi->poll_lock); | 91 | spin_unlock(&napi->poll_lock); |
85 | } | 92 | } |
86 | rcu_read_unlock(); | 93 | rcu_read_unlock(); |
87 | } | 94 | } |
88 | 95 | ||
96 | static inline void netpoll_netdev_init(struct net_device *dev) | ||
97 | { | ||
98 | INIT_LIST_HEAD(&dev->napi_list); | ||
99 | } | ||
100 | |||
89 | #else | 101 | #else |
90 | #define netpoll_rx(a) 0 | 102 | static inline int netpoll_rx(struct sk_buff *skb) |
91 | #define netpoll_poll_lock(a) NULL | 103 | { |
92 | #define netpoll_poll_unlock(a) | 104 | return 0; |
105 | } | ||
106 | static inline int netpoll_receive_skb(struct sk_buff *skb) | ||
107 | { | ||
108 | return 0; | ||
109 | } | ||
110 | static inline void *netpoll_poll_lock(struct napi_struct *napi) | ||
111 | { | ||
112 | return NULL; | ||
113 | } | ||
114 | static inline void netpoll_poll_unlock(void *have) | ||
115 | { | ||
116 | } | ||
117 | static inline void netpoll_netdev_init(struct net_device *dev) | ||
118 | { | ||
119 | } | ||
93 | #endif | 120 | #endif |
94 | 121 | ||
95 | #endif | 122 | #endif |
diff --git a/net/core/dev.c b/net/core/dev.c index a76021c71207..29cf00c5d865 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -220,7 +220,8 @@ static RAW_NOTIFIER_HEAD(netdev_chain); | |||
220 | * Device drivers call our routines to queue packets here. We empty the | 220 | * Device drivers call our routines to queue packets here. We empty the |
221 | * queue in the local softnet handler. | 221 | * queue in the local softnet handler. |
222 | */ | 222 | */ |
223 | DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL }; | 223 | |
224 | DEFINE_PER_CPU(struct softnet_data, softnet_data); | ||
224 | 225 | ||
225 | #ifdef CONFIG_SYSFS | 226 | #ifdef CONFIG_SYSFS |
226 | extern int netdev_sysfs_init(void); | 227 | extern int netdev_sysfs_init(void); |
@@ -1018,16 +1019,12 @@ int dev_close(struct net_device *dev) | |||
1018 | clear_bit(__LINK_STATE_START, &dev->state); | 1019 | clear_bit(__LINK_STATE_START, &dev->state); |
1019 | 1020 | ||
1020 | /* Synchronize to scheduled poll. We cannot touch poll list, | 1021 | /* Synchronize to scheduled poll. We cannot touch poll list, |
1021 | * it can be even on different cpu. So just clear netif_running(), | 1022 | * it can be even on different cpu. So just clear netif_running(). |
1022 | * and wait when poll really will happen. Actually, the best place | 1023 | * |
1023 | * for this is inside dev->stop() after device stopped its irq | 1024 | * dev->stop() will invoke napi_disable() on all of it's |
1024 | * engine, but this requires more changes in devices. */ | 1025 | * napi_struct instances on this device. |
1025 | 1026 | */ | |
1026 | smp_mb__after_clear_bit(); /* Commit netif_running(). */ | 1027 | smp_mb__after_clear_bit(); /* Commit netif_running(). */ |
1027 | while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { | ||
1028 | /* No hurry. */ | ||
1029 | msleep(1); | ||
1030 | } | ||
1031 | 1028 | ||
1032 | /* | 1029 | /* |
1033 | * Call the device specific close. This cannot fail. | 1030 | * Call the device specific close. This cannot fail. |
@@ -1233,21 +1230,21 @@ void __netif_schedule(struct net_device *dev) | |||
1233 | } | 1230 | } |
1234 | EXPORT_SYMBOL(__netif_schedule); | 1231 | EXPORT_SYMBOL(__netif_schedule); |
1235 | 1232 | ||
1236 | void __netif_rx_schedule(struct net_device *dev) | 1233 | void dev_kfree_skb_irq(struct sk_buff *skb) |
1237 | { | 1234 | { |
1238 | unsigned long flags; | 1235 | if (atomic_dec_and_test(&skb->users)) { |
1236 | struct softnet_data *sd; | ||
1237 | unsigned long flags; | ||
1239 | 1238 | ||
1240 | local_irq_save(flags); | 1239 | local_irq_save(flags); |
1241 | dev_hold(dev); | 1240 | sd = &__get_cpu_var(softnet_data); |
1242 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); | 1241 | skb->next = sd->completion_queue; |
1243 | if (dev->quota < 0) | 1242 | sd->completion_queue = skb; |
1244 | dev->quota += dev->weight; | 1243 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
1245 | else | 1244 | local_irq_restore(flags); |
1246 | dev->quota = dev->weight; | 1245 | } |
1247 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
1248 | local_irq_restore(flags); | ||
1249 | } | 1246 | } |
1250 | EXPORT_SYMBOL(__netif_rx_schedule); | 1247 | EXPORT_SYMBOL(dev_kfree_skb_irq); |
1251 | 1248 | ||
1252 | void dev_kfree_skb_any(struct sk_buff *skb) | 1249 | void dev_kfree_skb_any(struct sk_buff *skb) |
1253 | { | 1250 | { |
@@ -1259,7 +1256,12 @@ void dev_kfree_skb_any(struct sk_buff *skb) | |||
1259 | EXPORT_SYMBOL(dev_kfree_skb_any); | 1256 | EXPORT_SYMBOL(dev_kfree_skb_any); |
1260 | 1257 | ||
1261 | 1258 | ||
1262 | /* Hot-plugging. */ | 1259 | /** |
1260 | * netif_device_detach - mark device as removed | ||
1261 | * @dev: network device | ||
1262 | * | ||
1263 | * Mark device as removed from system and therefore no longer available. | ||
1264 | */ | ||
1263 | void netif_device_detach(struct net_device *dev) | 1265 | void netif_device_detach(struct net_device *dev) |
1264 | { | 1266 | { |
1265 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && | 1267 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && |
@@ -1269,6 +1271,12 @@ void netif_device_detach(struct net_device *dev) | |||
1269 | } | 1271 | } |
1270 | EXPORT_SYMBOL(netif_device_detach); | 1272 | EXPORT_SYMBOL(netif_device_detach); |
1271 | 1273 | ||
1274 | /** | ||
1275 | * netif_device_attach - mark device as attached | ||
1276 | * @dev: network device | ||
1277 | * | ||
1278 | * Mark device as attached from system and restart if needed. | ||
1279 | */ | ||
1272 | void netif_device_attach(struct net_device *dev) | 1280 | void netif_device_attach(struct net_device *dev) |
1273 | { | 1281 | { |
1274 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && | 1282 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && |
@@ -1730,7 +1738,7 @@ enqueue: | |||
1730 | return NET_RX_SUCCESS; | 1738 | return NET_RX_SUCCESS; |
1731 | } | 1739 | } |
1732 | 1740 | ||
1733 | netif_rx_schedule(&queue->backlog_dev); | 1741 | napi_schedule(&queue->backlog); |
1734 | goto enqueue; | 1742 | goto enqueue; |
1735 | } | 1743 | } |
1736 | 1744 | ||
@@ -1771,6 +1779,7 @@ static inline struct net_device *skb_bond(struct sk_buff *skb) | |||
1771 | return dev; | 1779 | return dev; |
1772 | } | 1780 | } |
1773 | 1781 | ||
1782 | |||
1774 | static void net_tx_action(struct softirq_action *h) | 1783 | static void net_tx_action(struct softirq_action *h) |
1775 | { | 1784 | { |
1776 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | 1785 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
@@ -1927,7 +1936,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
1927 | __be16 type; | 1936 | __be16 type; |
1928 | 1937 | ||
1929 | /* if we've gotten here through NAPI, check netpoll */ | 1938 | /* if we've gotten here through NAPI, check netpoll */ |
1930 | if (skb->dev->poll && netpoll_rx(skb)) | 1939 | if (netpoll_receive_skb(skb)) |
1931 | return NET_RX_DROP; | 1940 | return NET_RX_DROP; |
1932 | 1941 | ||
1933 | if (!skb->tstamp.tv64) | 1942 | if (!skb->tstamp.tv64) |
@@ -2017,22 +2026,25 @@ out: | |||
2017 | return ret; | 2026 | return ret; |
2018 | } | 2027 | } |
2019 | 2028 | ||
2020 | static int process_backlog(struct net_device *backlog_dev, int *budget) | 2029 | static int process_backlog(struct napi_struct *napi, int quota) |
2021 | { | 2030 | { |
2022 | int work = 0; | 2031 | int work = 0; |
2023 | int quota = min(backlog_dev->quota, *budget); | ||
2024 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 2032 | struct softnet_data *queue = &__get_cpu_var(softnet_data); |
2025 | unsigned long start_time = jiffies; | 2033 | unsigned long start_time = jiffies; |
2026 | 2034 | ||
2027 | backlog_dev->weight = weight_p; | 2035 | napi->weight = weight_p; |
2028 | for (;;) { | 2036 | do { |
2029 | struct sk_buff *skb; | 2037 | struct sk_buff *skb; |
2030 | struct net_device *dev; | 2038 | struct net_device *dev; |
2031 | 2039 | ||
2032 | local_irq_disable(); | 2040 | local_irq_disable(); |
2033 | skb = __skb_dequeue(&queue->input_pkt_queue); | 2041 | skb = __skb_dequeue(&queue->input_pkt_queue); |
2034 | if (!skb) | 2042 | if (!skb) { |
2035 | goto job_done; | 2043 | __napi_complete(napi); |
2044 | local_irq_enable(); | ||
2045 | break; | ||
2046 | } | ||
2047 | |||
2036 | local_irq_enable(); | 2048 | local_irq_enable(); |
2037 | 2049 | ||
2038 | dev = skb->dev; | 2050 | dev = skb->dev; |
@@ -2040,67 +2052,86 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) | |||
2040 | netif_receive_skb(skb); | 2052 | netif_receive_skb(skb); |
2041 | 2053 | ||
2042 | dev_put(dev); | 2054 | dev_put(dev); |
2055 | } while (++work < quota && jiffies == start_time); | ||
2043 | 2056 | ||
2044 | work++; | 2057 | return work; |
2045 | 2058 | } | |
2046 | if (work >= quota || jiffies - start_time > 1) | ||
2047 | break; | ||
2048 | |||
2049 | } | ||
2050 | |||
2051 | backlog_dev->quota -= work; | ||
2052 | *budget -= work; | ||
2053 | return -1; | ||
2054 | |||
2055 | job_done: | ||
2056 | backlog_dev->quota -= work; | ||
2057 | *budget -= work; | ||
2058 | 2059 | ||
2059 | list_del(&backlog_dev->poll_list); | 2060 | /** |
2060 | smp_mb__before_clear_bit(); | 2061 | * __napi_schedule - schedule for receive |
2061 | netif_poll_enable(backlog_dev); | 2062 | * @napi: entry to schedule |
2063 | * | ||
2064 | * The entry's receive function will be scheduled to run | ||
2065 | */ | ||
2066 | void fastcall __napi_schedule(struct napi_struct *n) | ||
2067 | { | ||
2068 | unsigned long flags; | ||
2062 | 2069 | ||
2063 | local_irq_enable(); | 2070 | local_irq_save(flags); |
2064 | return 0; | 2071 | list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); |
2072 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
2073 | local_irq_restore(flags); | ||
2065 | } | 2074 | } |
2075 | EXPORT_SYMBOL(__napi_schedule); | ||
2076 | |||
2066 | 2077 | ||
2067 | static void net_rx_action(struct softirq_action *h) | 2078 | static void net_rx_action(struct softirq_action *h) |
2068 | { | 2079 | { |
2069 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 2080 | struct list_head *list = &__get_cpu_var(softnet_data).poll_list; |
2070 | unsigned long start_time = jiffies; | 2081 | unsigned long start_time = jiffies; |
2071 | int budget = netdev_budget; | 2082 | int budget = netdev_budget; |
2072 | void *have; | 2083 | void *have; |
2073 | 2084 | ||
2074 | local_irq_disable(); | 2085 | local_irq_disable(); |
2075 | 2086 | ||
2076 | while (!list_empty(&queue->poll_list)) { | 2087 | while (!list_empty(list)) { |
2077 | struct net_device *dev; | 2088 | struct napi_struct *n; |
2089 | int work, weight; | ||
2078 | 2090 | ||
2079 | if (budget <= 0 || jiffies - start_time > 1) | 2091 | /* If softirq window is exhuasted then punt. |
2092 | * | ||
2093 | * Note that this is a slight policy change from the | ||
2094 | * previous NAPI code, which would allow up to 2 | ||
2095 | * jiffies to pass before breaking out. The test | ||
2096 | * used to be "jiffies - start_time > 1". | ||
2097 | */ | ||
2098 | if (unlikely(budget <= 0 || jiffies != start_time)) | ||
2080 | goto softnet_break; | 2099 | goto softnet_break; |
2081 | 2100 | ||
2082 | local_irq_enable(); | 2101 | local_irq_enable(); |
2083 | 2102 | ||
2084 | dev = list_entry(queue->poll_list.next, | 2103 | /* Even though interrupts have been re-enabled, this |
2085 | struct net_device, poll_list); | 2104 | * access is safe because interrupts can only add new |
2086 | have = netpoll_poll_lock(dev); | 2105 | * entries to the tail of this list, and only ->poll() |
2106 | * calls can remove this head entry from the list. | ||
2107 | */ | ||
2108 | n = list_entry(list->next, struct napi_struct, poll_list); | ||
2087 | 2109 | ||
2088 | if (dev->quota <= 0 || dev->poll(dev, &budget)) { | 2110 | have = netpoll_poll_lock(n); |
2089 | netpoll_poll_unlock(have); | 2111 | |
2090 | local_irq_disable(); | 2112 | weight = n->weight; |
2091 | list_move_tail(&dev->poll_list, &queue->poll_list); | 2113 | |
2092 | if (dev->quota < 0) | 2114 | work = n->poll(n, weight); |
2093 | dev->quota += dev->weight; | 2115 | |
2094 | else | 2116 | WARN_ON_ONCE(work > weight); |
2095 | dev->quota = dev->weight; | 2117 | |
2096 | } else { | 2118 | budget -= work; |
2097 | netpoll_poll_unlock(have); | 2119 | |
2098 | dev_put(dev); | 2120 | local_irq_disable(); |
2099 | local_irq_disable(); | 2121 | |
2100 | } | 2122 | /* Drivers must not modify the NAPI state if they |
2123 | * consume the entire weight. In such cases this code | ||
2124 | * still "owns" the NAPI instance and therefore can | ||
2125 | * move the instance around on the list at-will. | ||
2126 | */ | ||
2127 | if (unlikely(work == weight)) | ||
2128 | list_move_tail(&n->poll_list, list); | ||
2129 | |||
2130 | netpoll_poll_unlock(have); | ||
2101 | } | 2131 | } |
2102 | out: | 2132 | out: |
2103 | local_irq_enable(); | 2133 | local_irq_enable(); |
2134 | |||
2104 | #ifdef CONFIG_NET_DMA | 2135 | #ifdef CONFIG_NET_DMA |
2105 | /* | 2136 | /* |
2106 | * There may not be any more sk_buffs coming right now, so push | 2137 | * There may not be any more sk_buffs coming right now, so push |
@@ -2115,6 +2146,7 @@ out: | |||
2115 | } | 2146 | } |
2116 | } | 2147 | } |
2117 | #endif | 2148 | #endif |
2149 | |||
2118 | return; | 2150 | return; |
2119 | 2151 | ||
2120 | softnet_break: | 2152 | softnet_break: |
@@ -3704,6 +3736,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
3704 | dev->egress_subqueue_count = queue_count; | 3736 | dev->egress_subqueue_count = queue_count; |
3705 | 3737 | ||
3706 | dev->get_stats = internal_stats; | 3738 | dev->get_stats = internal_stats; |
3739 | netpoll_netdev_init(dev); | ||
3707 | setup(dev); | 3740 | setup(dev); |
3708 | strcpy(dev->name, name); | 3741 | strcpy(dev->name, name); |
3709 | return dev; | 3742 | return dev; |
@@ -4076,10 +4109,9 @@ static int __init net_dev_init(void) | |||
4076 | skb_queue_head_init(&queue->input_pkt_queue); | 4109 | skb_queue_head_init(&queue->input_pkt_queue); |
4077 | queue->completion_queue = NULL; | 4110 | queue->completion_queue = NULL; |
4078 | INIT_LIST_HEAD(&queue->poll_list); | 4111 | INIT_LIST_HEAD(&queue->poll_list); |
4079 | set_bit(__LINK_STATE_START, &queue->backlog_dev.state); | 4112 | |
4080 | queue->backlog_dev.weight = weight_p; | 4113 | queue->backlog.poll = process_backlog; |
4081 | queue->backlog_dev.poll = process_backlog; | 4114 | queue->backlog.weight = weight_p; |
4082 | atomic_set(&queue->backlog_dev.refcnt, 1); | ||
4083 | } | 4115 | } |
4084 | 4116 | ||
4085 | netdev_dma_register(); | 4117 | netdev_dma_register(); |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 5c19b0646d7a..79159db6acb9 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -216,20 +216,6 @@ static ssize_t store_tx_queue_len(struct device *dev, | |||
216 | return netdev_store(dev, attr, buf, len, change_tx_queue_len); | 216 | return netdev_store(dev, attr, buf, len, change_tx_queue_len); |
217 | } | 217 | } |
218 | 218 | ||
219 | NETDEVICE_SHOW(weight, fmt_dec); | ||
220 | |||
221 | static int change_weight(struct net_device *net, unsigned long new_weight) | ||
222 | { | ||
223 | net->weight = new_weight; | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static ssize_t store_weight(struct device *dev, struct device_attribute *attr, | ||
228 | const char *buf, size_t len) | ||
229 | { | ||
230 | return netdev_store(dev, attr, buf, len, change_weight); | ||
231 | } | ||
232 | |||
233 | static struct device_attribute net_class_attributes[] = { | 219 | static struct device_attribute net_class_attributes[] = { |
234 | __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), | 220 | __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), |
235 | __ATTR(iflink, S_IRUGO, show_iflink, NULL), | 221 | __ATTR(iflink, S_IRUGO, show_iflink, NULL), |
@@ -246,7 +232,6 @@ static struct device_attribute net_class_attributes[] = { | |||
246 | __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags), | 232 | __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags), |
247 | __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, | 233 | __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, |
248 | store_tx_queue_len), | 234 | store_tx_queue_len), |
249 | __ATTR(weight, S_IRUGO | S_IWUSR, show_weight, store_weight), | ||
250 | {} | 235 | {} |
251 | }; | 236 | }; |
252 | 237 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index de1b26aa5720..abe6e3a4cc44 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -119,19 +119,22 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, | |||
119 | static void poll_napi(struct netpoll *np) | 119 | static void poll_napi(struct netpoll *np) |
120 | { | 120 | { |
121 | struct netpoll_info *npinfo = np->dev->npinfo; | 121 | struct netpoll_info *npinfo = np->dev->npinfo; |
122 | struct napi_struct *napi; | ||
122 | int budget = 16; | 123 | int budget = 16; |
123 | 124 | ||
124 | if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && | 125 | list_for_each_entry(napi, &np->dev->napi_list, dev_list) { |
125 | npinfo->poll_owner != smp_processor_id() && | 126 | if (test_bit(NAPI_STATE_SCHED, &napi->state) && |
126 | spin_trylock(&npinfo->poll_lock)) { | 127 | napi->poll_owner != smp_processor_id() && |
127 | npinfo->rx_flags |= NETPOLL_RX_DROP; | 128 | spin_trylock(&napi->poll_lock)) { |
128 | atomic_inc(&trapped); | 129 | npinfo->rx_flags |= NETPOLL_RX_DROP; |
130 | atomic_inc(&trapped); | ||
129 | 131 | ||
130 | np->dev->poll(np->dev, &budget); | 132 | napi->poll(napi, budget); |
131 | 133 | ||
132 | atomic_dec(&trapped); | 134 | atomic_dec(&trapped); |
133 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; | 135 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; |
134 | spin_unlock(&npinfo->poll_lock); | 136 | spin_unlock(&napi->poll_lock); |
137 | } | ||
135 | } | 138 | } |
136 | } | 139 | } |
137 | 140 | ||
@@ -157,7 +160,7 @@ void netpoll_poll(struct netpoll *np) | |||
157 | 160 | ||
158 | /* Process pending work on NIC */ | 161 | /* Process pending work on NIC */ |
159 | np->dev->poll_controller(np->dev); | 162 | np->dev->poll_controller(np->dev); |
160 | if (np->dev->poll) | 163 | if (!list_empty(&np->dev->napi_list)) |
161 | poll_napi(np); | 164 | poll_napi(np); |
162 | 165 | ||
163 | service_arp_queue(np->dev->npinfo); | 166 | service_arp_queue(np->dev->npinfo); |
@@ -233,6 +236,17 @@ repeat: | |||
233 | return skb; | 236 | return skb; |
234 | } | 237 | } |
235 | 238 | ||
239 | static int netpoll_owner_active(struct net_device *dev) | ||
240 | { | ||
241 | struct napi_struct *napi; | ||
242 | |||
243 | list_for_each_entry(napi, &dev->napi_list, dev_list) { | ||
244 | if (napi->poll_owner == smp_processor_id()) | ||
245 | return 1; | ||
246 | } | ||
247 | return 0; | ||
248 | } | ||
249 | |||
236 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | 250 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
237 | { | 251 | { |
238 | int status = NETDEV_TX_BUSY; | 252 | int status = NETDEV_TX_BUSY; |
@@ -246,8 +260,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
246 | } | 260 | } |
247 | 261 | ||
248 | /* don't get messages out of order, and no recursion */ | 262 | /* don't get messages out of order, and no recursion */ |
249 | if (skb_queue_len(&npinfo->txq) == 0 && | 263 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
250 | npinfo->poll_owner != smp_processor_id()) { | ||
251 | unsigned long flags; | 264 | unsigned long flags; |
252 | 265 | ||
253 | local_irq_save(flags); | 266 | local_irq_save(flags); |
@@ -652,8 +665,6 @@ int netpoll_setup(struct netpoll *np) | |||
652 | 665 | ||
653 | npinfo->rx_flags = 0; | 666 | npinfo->rx_flags = 0; |
654 | npinfo->rx_np = NULL; | 667 | npinfo->rx_np = NULL; |
655 | spin_lock_init(&npinfo->poll_lock); | ||
656 | npinfo->poll_owner = -1; | ||
657 | 668 | ||
658 | spin_lock_init(&npinfo->rx_lock); | 669 | spin_lock_init(&npinfo->rx_lock); |
659 | skb_queue_head_init(&npinfo->arp_tx); | 670 | skb_queue_head_init(&npinfo->arp_tx); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4756d5857abf..2b0b6fac6cef 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -634,7 +634,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
634 | 634 | ||
635 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); | 635 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); |
636 | NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len); | 636 | NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len); |
637 | NLA_PUT_U32(skb, IFLA_WEIGHT, dev->weight); | ||
638 | NLA_PUT_U8(skb, IFLA_OPERSTATE, | 637 | NLA_PUT_U8(skb, IFLA_OPERSTATE, |
639 | netif_running(dev) ? dev->operstate : IF_OPER_DOWN); | 638 | netif_running(dev) ? dev->operstate : IF_OPER_DOWN); |
640 | NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); | 639 | NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); |
@@ -834,9 +833,6 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
834 | if (tb[IFLA_TXQLEN]) | 833 | if (tb[IFLA_TXQLEN]) |
835 | dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); | 834 | dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); |
836 | 835 | ||
837 | if (tb[IFLA_WEIGHT]) | ||
838 | dev->weight = nla_get_u32(tb[IFLA_WEIGHT]); | ||
839 | |||
840 | if (tb[IFLA_OPERSTATE]) | 836 | if (tb[IFLA_OPERSTATE]) |
841 | set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); | 837 | set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); |
842 | 838 | ||
@@ -1074,8 +1070,6 @@ replay: | |||
1074 | nla_len(tb[IFLA_BROADCAST])); | 1070 | nla_len(tb[IFLA_BROADCAST])); |
1075 | if (tb[IFLA_TXQLEN]) | 1071 | if (tb[IFLA_TXQLEN]) |
1076 | dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); | 1072 | dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); |
1077 | if (tb[IFLA_WEIGHT]) | ||
1078 | dev->weight = nla_get_u32(tb[IFLA_WEIGHT]); | ||
1079 | if (tb[IFLA_OPERSTATE]) | 1073 | if (tb[IFLA_OPERSTATE]) |
1080 | set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); | 1074 | set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); |
1081 | if (tb[IFLA_LINKMODE]) | 1075 | if (tb[IFLA_LINKMODE]) |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index c81649cf0b9e..e970e8e75720 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -256,6 +256,12 @@ static void dev_watchdog_down(struct net_device *dev) | |||
256 | netif_tx_unlock_bh(dev); | 256 | netif_tx_unlock_bh(dev); |
257 | } | 257 | } |
258 | 258 | ||
259 | /** | ||
260 | * netif_carrier_on - set carrier | ||
261 | * @dev: network device | ||
262 | * | ||
263 | * Device has detected that carrier. | ||
264 | */ | ||
259 | void netif_carrier_on(struct net_device *dev) | 265 | void netif_carrier_on(struct net_device *dev) |
260 | { | 266 | { |
261 | if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) | 267 | if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) |
@@ -264,6 +270,12 @@ void netif_carrier_on(struct net_device *dev) | |||
264 | __netdev_watchdog_up(dev); | 270 | __netdev_watchdog_up(dev); |
265 | } | 271 | } |
266 | 272 | ||
273 | /** | ||
274 | * netif_carrier_off - clear carrier | ||
275 | * @dev: network device | ||
276 | * | ||
277 | * Device has detected loss of carrier. | ||
278 | */ | ||
267 | void netif_carrier_off(struct net_device *dev) | 279 | void netif_carrier_off(struct net_device *dev) |
268 | { | 280 | { |
269 | if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) | 281 | if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) |