From 485832a5d928facd82f1525270d9f048da2063a1 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Use int operations in spinlocks to support more than 128 CPUs spinning. Pointed out by Eric Dumazet Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/spinlock.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include/asm-x86_64') diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 69636831ad2f..fe484a699cc3 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -18,22 +18,22 @@ */ #define __raw_spin_is_locked(x) \ - (*(volatile signed char *)(&(x)->slock) <= 0) + (*(volatile signed int *)(&(x)->slock) <= 0) #define __raw_spin_lock_string \ "\n1:\t" \ - "lock ; decb %0\n\t" \ + "lock ; decl %0\n\t" \ "js 2f\n" \ LOCK_SECTION_START("") \ "2:\t" \ "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ + "cmpl $0,%0\n\t" \ "jle 2b\n\t" \ "jmp 1b\n" \ LOCK_SECTION_END #define __raw_spin_unlock_string \ - "movb $1,%0" \ + "movl $1,%0" \ :"=m" (lock->slock) : : "memory" static inline void __raw_spin_lock(raw_spinlock_t *lock) @@ -47,10 +47,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) static inline int __raw_spin_trylock(raw_spinlock_t *lock) { - char oldval; + int oldval; __asm__ __volatile__( - "xchgb %b0,%1" + "xchgl %0,%1" :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); -- cgit v1.2.2