diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-09-27 18:16:47 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-27 18:16:47 -0400 |
commit | 3b9f6cb8a1ec791be79c6c7595fea922f12d1e64 (patch) | |
tree | 2393a448add846e6c2ed12f68106c3018b72c6a9 /include/asm-x86_64/spinlock.h | |
parent | c38778c3a9aeadcd1ee319cfc8ea5a9cbf8cdafa (diff) | |
parent | a77c64c1a641950626181b4857abb701d8f38ccc (diff) |
Merge branch 'master' into upstream
Diffstat (limited to 'include/asm-x86_64/spinlock.h')
-rw-r--r-- | include/asm-x86_64/spinlock.h | 80 |
1 files changed, 41 insertions, 39 deletions
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 248a79f0eaff..3daf5b005905 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
5 | #include <asm/rwlock.h> | 5 | #include <asm/rwlock.h> |
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm/processor.h> | ||
7 | 8 | ||
8 | /* | 9 | /* |
9 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
@@ -16,31 +17,23 @@ | |||
16 | * (the type definitions are in asm/spinlock_types.h) | 17 | * (the type definitions are in asm/spinlock_types.h) |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #define __raw_spin_is_locked(x) \ | 20 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
20 | (*(volatile signed int *)(&(x)->slock) <= 0) | 21 | { |
21 | 22 | return *(volatile signed int *)(&(lock)->slock) <= 0; | |
22 | #define __raw_spin_lock_string \ | 23 | } |
23 | "\n1:\t" \ | ||
24 | LOCK_PREFIX " ; decl %0\n\t" \ | ||
25 | "js 2f\n" \ | ||
26 | LOCK_SECTION_START("") \ | ||
27 | "2:\t" \ | ||
28 | "rep;nop\n\t" \ | ||
29 | "cmpl $0,%0\n\t" \ | ||
30 | "jle 2b\n\t" \ | ||
31 | "jmp 1b\n" \ | ||
32 | LOCK_SECTION_END | ||
33 | |||
34 | #define __raw_spin_lock_string_up \ | ||
35 | "\n\tdecl %0" | ||
36 | |||
37 | #define __raw_spin_unlock_string \ | ||
38 | "movl $1,%0" \ | ||
39 | :"=m" (lock->slock) : : "memory" | ||
40 | 24 | ||
41 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 25 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
42 | { | 26 | { |
43 | asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory"); | 27 | asm volatile( |
28 | "\n1:\t" | ||
29 | LOCK_PREFIX " ; decl %0\n\t" | ||
30 | "jns 2f\n" | ||
31 | "3:\n" | ||
32 | "rep;nop\n\t" | ||
33 | "cmpl $0,%0\n\t" | ||
34 | "jle 3b\n\t" | ||
35 | "jmp 1b\n" | ||
36 | "2:\t" : "=m" (lock->slock) : : "memory"); | ||
44 | } | 37 | } |
45 | 38 | ||
46 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 39 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
@@ -49,7 +42,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
49 | { | 42 | { |
50 | int oldval; | 43 | int oldval; |
51 | 44 | ||
52 | __asm__ __volatile__( | 45 | asm volatile( |
53 | "xchgl %0,%1" | 46 | "xchgl %0,%1" |
54 | :"=q" (oldval), "=m" (lock->slock) | 47 | :"=q" (oldval), "=m" (lock->slock) |
55 | :"0" (0) : "memory"); | 48 | :"0" (0) : "memory"); |
@@ -59,13 +52,14 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
59 | 52 | ||
60 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 53 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
61 | { | 54 | { |
62 | __asm__ __volatile__( | 55 | asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory"); |
63 | __raw_spin_unlock_string | ||
64 | ); | ||
65 | } | 56 | } |
66 | 57 | ||
67 | #define __raw_spin_unlock_wait(lock) \ | 58 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
68 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 59 | { |
60 | while (__raw_spin_is_locked(lock)) | ||
61 | cpu_relax(); | ||
62 | } | ||
69 | 63 | ||
70 | /* | 64 | /* |
71 | * Read-write spinlocks, allowing multiple readers | 65 | * Read-write spinlocks, allowing multiple readers |
@@ -79,26 +73,34 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
79 | * | 73 | * |
80 | * On x86, we implement read-write locks as a 32-bit counter | 74 | * On x86, we implement read-write locks as a 32-bit counter |
81 | * with the high bit (sign) being the "contended" bit. | 75 | * with the high bit (sign) being the "contended" bit. |
82 | * | ||
83 | * The inline assembly is non-obvious. Think about it. | ||
84 | * | ||
85 | * Changed to use the same technique as rw semaphores. See | ||
86 | * semaphore.h for details. -ben | ||
87 | * | ||
88 | * the helpers are in arch/i386/kernel/semaphore.c | ||
89 | */ | 76 | */ |
90 | 77 | ||
91 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 78 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
92 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 79 | { |
80 | return (int)(lock)->lock > 0; | ||
81 | } | ||
82 | |||
83 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | ||
84 | { | ||
85 | return (lock)->lock == RW_LOCK_BIAS; | ||
86 | } | ||
93 | 87 | ||
94 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 88 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
95 | { | 89 | { |
96 | __build_read_lock(rw, "__read_lock_failed"); | 90 | asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" |
91 | "jns 1f\n" | ||
92 | "call __read_lock_failed\n" | ||
93 | "1:\n" | ||
94 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
97 | } | 95 | } |
98 | 96 | ||
99 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 97 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
100 | { | 98 | { |
101 | __build_write_lock(rw, "__write_lock_failed"); | 99 | asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" |
100 | "jz 1f\n" | ||
101 | "\tcall __write_lock_failed\n\t" | ||
102 | "1:\n" | ||
103 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
102 | } | 104 | } |
103 | 105 | ||
104 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 106 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |