diff options
author | Samuel Thibault <samuel.thibault@labri.fr> | 2005-05-21 11:50:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-21 13:46:48 -0400 |
commit | 10f02d1c59e55f529140dda3a92f0099d748451c (patch) | |
tree | 6b5a5804503401624171aff65b09ff022a9f0103 /include/linux | |
parent | 9636273dae265b9354b861b373cd43cd76a6d0fe (diff) |
[PATCH] spin_unlock_bh() and preempt_check_resched()
In _spin_unlock_bh(lock):
do { \
_raw_spin_unlock(lock); \
preempt_enable(); \
local_bh_enable(); \
__release(lock); \
} while (0)
there is no reason for using preempt_enable() instead of a simple
preempt_enable_no_resched()
Since we know bottom halves are disabled, preempt_schedule() will always
return at once (preempt_count!=0), and hence preempt_check_resched() is
useless here...
This fixes it by using "preempt_enable_no_resched()" instead of the
"preempt_enable()", and thus avoids the useless preempt_check_resched()
just before re-enabling bottom halves.
Signed-off-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/spinlock.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index e895f3eaf53a..d6ba068719b6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -248,7 +248,7 @@ typedef struct { | |||
248 | 248 | ||
249 | #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ | 249 | #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ |
250 | _raw_spin_trylock(lock) ? \ | 250 | _raw_spin_trylock(lock) ? \ |
251 | 1 : ({preempt_enable(); local_bh_enable(); 0;});}) | 251 | 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});}) |
252 | 252 | ||
253 | #define _spin_lock(lock) \ | 253 | #define _spin_lock(lock) \ |
254 | do { \ | 254 | do { \ |
@@ -383,7 +383,7 @@ do { \ | |||
383 | #define _spin_unlock_bh(lock) \ | 383 | #define _spin_unlock_bh(lock) \ |
384 | do { \ | 384 | do { \ |
385 | _raw_spin_unlock(lock); \ | 385 | _raw_spin_unlock(lock); \ |
386 | preempt_enable(); \ | 386 | preempt_enable_no_resched(); \ |
387 | local_bh_enable(); \ | 387 | local_bh_enable(); \ |
388 | __release(lock); \ | 388 | __release(lock); \ |
389 | } while (0) | 389 | } while (0) |
@@ -391,7 +391,7 @@ do { \ | |||
391 | #define _write_unlock_bh(lock) \ | 391 | #define _write_unlock_bh(lock) \ |
392 | do { \ | 392 | do { \ |
393 | _raw_write_unlock(lock); \ | 393 | _raw_write_unlock(lock); \ |
394 | preempt_enable(); \ | 394 | preempt_enable_no_resched(); \ |
395 | local_bh_enable(); \ | 395 | local_bh_enable(); \ |
396 | __release(lock); \ | 396 | __release(lock); \ |
397 | } while (0) | 397 | } while (0) |
@@ -423,8 +423,8 @@ do { \ | |||
423 | #define _read_unlock_bh(lock) \ | 423 | #define _read_unlock_bh(lock) \ |
424 | do { \ | 424 | do { \ |
425 | _raw_read_unlock(lock); \ | 425 | _raw_read_unlock(lock); \ |
426 | preempt_enable_no_resched(); \ | ||
426 | local_bh_enable(); \ | 427 | local_bh_enable(); \ |
427 | preempt_enable(); \ | ||
428 | __release(lock); \ | 428 | __release(lock); \ |
429 | } while (0) | 429 | } while (0) |
430 | 430 | ||