aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-01-25 15:50:13 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-26 08:06:36 -0500
commit2d4d57db692ea790e185656516e6ebe8791f1788 (patch)
treef7ad21dbcb4d5de2f0eb63024c1b5f1bf5321d76 /arch/x86/include/asm
parent7106a5ab89c50c6b5aadea0850b40323804a922d (diff)
x86: micro-optimize __raw_read_trylock()
The current version of __raw_read_trylock starts with decrementing the lock and read its new value as a separate operation after that. That makes 3 dereferences (read, write (after sub), read) whereas a single atomic_dec_return does only two pointers dereferences (read, write). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/spinlock.h3
1 files changed, 1 insertions, 2 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index d17c91981da2..4d3dcc51cacd 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -329,8 +329,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
329{ 329{
330 atomic_t *count = (atomic_t *)lock; 330 atomic_t *count = (atomic_t *)lock;
331 331
332 atomic_dec(count); 332 if (atomic_dec_return(count) >= 0)
333 if (atomic_read(count) >= 0)
334 return 1; 333 return 1;
335 atomic_inc(count); 334 atomic_inc(count);
336 return 0; 335 return 0;