diff options
| author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-01-09 11:05:12 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-01-09 11:05:12 -0500 |
| commit | fd0b45dfd1858c6b49d06355a460bcf36d654c06 (patch) | |
| tree | 85386a76355b398b7f452b737af4ab48f116677a /include/linux | |
| parent | fcfd50afb6e94c8cf121ca4e7e3e7166bae7c6aa (diff) | |
| parent | 1c9b7aa1eb40ab708ef3242f74b9a61487623168 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (36 commits)
[ATM]: Check IP header validity in mpc_send_packet
[IPV6]: IPV6_MULTICAST_IF setting is ignored on link-local connect()
[CONNECTOR]: Don't touch queue dev after decrement of ref count.
[SOCK]: Adds a rcu_dereference() in sk_filter
[XFRM]: xfrm_algo_clone() allocates too much memory
[FORCEDETH]: Fix reversing the MAC address on suspend.
[NET]: mcs7830 passes msecs instead of jiffies to usb_control_msg
[LRO] Fix lro_mgr->features checks
[NET]: Clone the sk_buff 'iif' field in __skb_clone()
[IPV4] ROUTE: ip_rt_dump() is unecessary slow
[NET]: kaweth was forgotten in msec switchover of usb_start_wait_urb
[NET] Intel ethernet drivers: update MAINTAINERS
[NET]: Make ->poll() breakout consistent in Intel ethernet drivers.
[NET]: Stop polling when napi_disable() is pending.
[NET]: Fix drivers to handle napi_disable() disabling interrupts.
[NETXEN]: Fix ->poll() done logic.
mac80211: return an error when SIWRATE doesn't match any rate
ssb: Fix probing of PCI cores if PCI and PCIE core is available
[NET]: Do not check netif_running() and carrier state in ->poll()
[NET]: Add NAPI_STATE_DISABLE.
...
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/netdevice.h | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1e6af4f174..b0813c3286 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -319,21 +319,29 @@ struct napi_struct { | |||
| 319 | enum | 319 | enum |
| 320 | { | 320 | { |
| 321 | NAPI_STATE_SCHED, /* Poll is scheduled */ | 321 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
| 322 | NAPI_STATE_DISABLE, /* Disable pending */ | ||
| 322 | }; | 323 | }; |
| 323 | 324 | ||
| 324 | extern void FASTCALL(__napi_schedule(struct napi_struct *n)); | 325 | extern void FASTCALL(__napi_schedule(struct napi_struct *n)); |
| 325 | 326 | ||
| 327 | static inline int napi_disable_pending(struct napi_struct *n) | ||
| 328 | { | ||
| 329 | return test_bit(NAPI_STATE_DISABLE, &n->state); | ||
| 330 | } | ||
| 331 | |||
| 326 | /** | 332 | /** |
| 327 | * napi_schedule_prep - check if napi can be scheduled | 333 | * napi_schedule_prep - check if napi can be scheduled |
| 328 | * @n: napi context | 334 | * @n: napi context |
| 329 | * | 335 | * |
| 330 | * Test if NAPI routine is already running, and if not mark | 336 | * Test if NAPI routine is already running, and if not mark |
| 331 | * it as running. This is used as a condition variable | 337 | * it as running. This is used as a condition variable |
| 332 | * insure only one NAPI poll instance runs | 338 | * insure only one NAPI poll instance runs. We also make |
| 339 | * sure there is no pending NAPI disable. | ||
| 333 | */ | 340 | */ |
| 334 | static inline int napi_schedule_prep(struct napi_struct *n) | 341 | static inline int napi_schedule_prep(struct napi_struct *n) |
| 335 | { | 342 | { |
| 336 | return !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | 343 | return !napi_disable_pending(n) && |
| 344 | !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | ||
| 337 | } | 345 | } |
| 338 | 346 | ||
| 339 | /** | 347 | /** |
| @@ -389,8 +397,10 @@ static inline void napi_complete(struct napi_struct *n) | |||
| 389 | */ | 397 | */ |
| 390 | static inline void napi_disable(struct napi_struct *n) | 398 | static inline void napi_disable(struct napi_struct *n) |
| 391 | { | 399 | { |
| 400 | set_bit(NAPI_STATE_DISABLE, &n->state); | ||
| 392 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | 401 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
| 393 | msleep(1); | 402 | msleep(1); |
| 403 | clear_bit(NAPI_STATE_DISABLE, &n->state); | ||
| 394 | } | 404 | } |
| 395 | 405 | ||
| 396 | /** | 406 | /** |
| @@ -1268,7 +1278,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |||
| 1268 | static inline int netif_rx_schedule_prep(struct net_device *dev, | 1278 | static inline int netif_rx_schedule_prep(struct net_device *dev, |
| 1269 | struct napi_struct *napi) | 1279 | struct napi_struct *napi) |
| 1270 | { | 1280 | { |
| 1271 | return netif_running(dev) && napi_schedule_prep(napi); | 1281 | return napi_schedule_prep(napi); |
| 1272 | } | 1282 | } |
| 1273 | 1283 | ||
| 1274 | /* Add interface to tail of rx poll list. This assumes that _prep has | 1284 | /* Add interface to tail of rx poll list. This assumes that _prep has |
| @@ -1277,7 +1287,6 @@ static inline int netif_rx_schedule_prep(struct net_device *dev, | |||
| 1277 | static inline void __netif_rx_schedule(struct net_device *dev, | 1287 | static inline void __netif_rx_schedule(struct net_device *dev, |
| 1278 | struct napi_struct *napi) | 1288 | struct napi_struct *napi) |
| 1279 | { | 1289 | { |
| 1280 | dev_hold(dev); | ||
| 1281 | __napi_schedule(napi); | 1290 | __napi_schedule(napi); |
| 1282 | } | 1291 | } |
| 1283 | 1292 | ||
| @@ -1308,7 +1317,6 @@ static inline void __netif_rx_complete(struct net_device *dev, | |||
| 1308 | struct napi_struct *napi) | 1317 | struct napi_struct *napi) |
| 1309 | { | 1318 | { |
| 1310 | __napi_complete(napi); | 1319 | __napi_complete(napi); |
| 1311 | dev_put(dev); | ||
| 1312 | } | 1320 | } |
| 1313 | 1321 | ||
| 1314 | /* Remove interface from poll list: it must be in the poll list | 1322 | /* Remove interface from poll list: it must be in the poll list |
