diff options
author | David S. Miller <davem@davemloft.net> | 2008-01-07 23:35:07 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-09 02:30:07 -0500 |
commit | a0a46196cd98af5cc015842bba757571f02a8c30 (patch) | |
tree | 1e73ef1d0251f969fbb1a51637722c2c793ff0b6 /include/linux/netdevice.h | |
parent | bdb95b1792664f25eb2a4d13a587d2020aa93002 (diff) |
[NET]: Add NAPI_STATE_DISABLE.
Create a bit to signal that a napi_disable() is in progress.
This sets up infrastructure such that net_rx_action() can generically
break out of the ->poll() loop on a NAPI context that has a pending
napi_disable() yet is being bombed with packets (and thus would
otherwise poll endlessly and not allow the napi_disable() to finish).
Now, what napi_disable() does is first set the NAPI_STATE_DISABLE bit
(to indicate that a disable is pending), then it polls for the
NAPI_STATE_SCHED bit, and once the NAPI_STATE_SCHED bit is acquired
the NAPI_STATE_DISABLE bit is cleared. Here, the test_and_set_bit()
provides the necessary memory barrier between the various bitops.
napi_schedule_prep() now tests for a pending disable as it's first
action and won't try to obtain the NAPI_STATE_SCHED bit if a disable
is pending.
As a result, we can remove the netif_running() check in
netif_rx_schedule_prep() because the NAPI disable pending state serves
this purpose. And, it does so in a NAPI centric manner which is what
we really want.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e393995d283..b0813c3286b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -319,21 +319,29 @@ struct napi_struct { | |||
319 | enum | 319 | enum |
320 | { | 320 | { |
321 | NAPI_STATE_SCHED, /* Poll is scheduled */ | 321 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
322 | NAPI_STATE_DISABLE, /* Disable pending */ | ||
322 | }; | 323 | }; |
323 | 324 | ||
324 | extern void FASTCALL(__napi_schedule(struct napi_struct *n)); | 325 | extern void FASTCALL(__napi_schedule(struct napi_struct *n)); |
325 | 326 | ||
327 | static inline int napi_disable_pending(struct napi_struct *n) | ||
328 | { | ||
329 | return test_bit(NAPI_STATE_DISABLE, &n->state); | ||
330 | } | ||
331 | |||
326 | /** | 332 | /** |
327 | * napi_schedule_prep - check if napi can be scheduled | 333 | * napi_schedule_prep - check if napi can be scheduled |
328 | * @n: napi context | 334 | * @n: napi context |
329 | * | 335 | * |
330 | * Test if NAPI routine is already running, and if not mark | 336 | * Test if NAPI routine is already running, and if not mark |
331 | * it as running. This is used as a condition variable | 337 | * it as running. This is used as a condition variable |
332 | * insure only one NAPI poll instance runs | 338 | * insure only one NAPI poll instance runs. We also make |
339 | * sure there is no pending NAPI disable. | ||
333 | */ | 340 | */ |
334 | static inline int napi_schedule_prep(struct napi_struct *n) | 341 | static inline int napi_schedule_prep(struct napi_struct *n) |
335 | { | 342 | { |
336 | return !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | 343 | return !napi_disable_pending(n) && |
344 | !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | ||
337 | } | 345 | } |
338 | 346 | ||
339 | /** | 347 | /** |
@@ -389,8 +397,10 @@ static inline void napi_complete(struct napi_struct *n) | |||
389 | */ | 397 | */ |
390 | static inline void napi_disable(struct napi_struct *n) | 398 | static inline void napi_disable(struct napi_struct *n) |
391 | { | 399 | { |
400 | set_bit(NAPI_STATE_DISABLE, &n->state); | ||
392 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | 401 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
393 | msleep(1); | 402 | msleep(1); |
403 | clear_bit(NAPI_STATE_DISABLE, &n->state); | ||
394 | } | 404 | } |
395 | 405 | ||
396 | /** | 406 | /** |
@@ -1268,7 +1278,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |||
1268 | static inline int netif_rx_schedule_prep(struct net_device *dev, | 1278 | static inline int netif_rx_schedule_prep(struct net_device *dev, |
1269 | struct napi_struct *napi) | 1279 | struct napi_struct *napi) |
1270 | { | 1280 | { |
1271 | return netif_running(dev) && napi_schedule_prep(napi); | 1281 | return napi_schedule_prep(napi); |
1272 | } | 1282 | } |
1273 | 1283 | ||
1274 | /* Add interface to tail of rx poll list. This assumes that _prep has | 1284 | /* Add interface to tail of rx poll list. This assumes that _prep has |