diff options
author | Andi Kleen <ak@suse.de> | 2006-09-26 04:52:32 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:32 -0400 |
commit | fb2e28485679418e459583605f9b19807a72ceca (patch) | |
tree | c3e6bf7f75fb9c6ed286ef09eebf281388761cfe /include/asm-i386/spinlock.h | |
parent | 8b059d2373c16b6d32787a49daf8ccf72dc61b71 (diff) |
[PATCH] i386: Clean up spin/rwlocks
- Inline spinlock strings into their inline functions
- Convert macros to typesafe inlines
- Replace some leftover __asm__ __volatile__s with asm volatile
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'include/asm-i386/spinlock.h')
-rw-r--r-- | include/asm-i386/spinlock.h | 131 |
1 files changed, 67 insertions, 64 deletions
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index d1020363c41a..324329313af8 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
5 | #include <asm/rwlock.h> | 5 | #include <asm/rwlock.h> |
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm/processor.h> | ||
7 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
8 | 9 | ||
9 | /* | 10 | /* |
@@ -17,67 +18,64 @@ | |||
17 | * (the type definitions are in asm/spinlock_types.h) | 18 | * (the type definitions are in asm/spinlock_types.h) |
18 | */ | 19 | */ |
19 | 20 | ||
20 | #define __raw_spin_is_locked(x) \ | 21 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
21 | (*(volatile signed char *)(&(x)->slock) <= 0) | 22 | { |
22 | 23 | return *(volatile signed char *)(&(x)->slock) <= 0; | |
23 | #define __raw_spin_lock_string \ | 24 | } |
24 | "\n1:\t" \ | ||
25 | LOCK_PREFIX " ; decb %0\n\t" \ | ||
26 | "jns 3f\n" \ | ||
27 | "2:\t" \ | ||
28 | "rep;nop\n\t" \ | ||
29 | "cmpb $0,%0\n\t" \ | ||
30 | "jle 2b\n\t" \ | ||
31 | "jmp 1b\n" \ | ||
32 | "3:\n\t" | ||
33 | |||
34 | /* | ||
35 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
36 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use | ||
37 | * __raw_spin_lock_string_flags(). | ||
38 | */ | ||
39 | #define __raw_spin_lock_string_flags \ | ||
40 | "\n1:\t" \ | ||
41 | LOCK_PREFIX " ; decb %0\n\t" \ | ||
42 | "jns 5f\n" \ | ||
43 | "2:\t" \ | ||
44 | "testl $0x200, %1\n\t" \ | ||
45 | "jz 4f\n\t" \ | ||
46 | "sti\n" \ | ||
47 | "3:\t" \ | ||
48 | "rep;nop\n\t" \ | ||
49 | "cmpb $0, %0\n\t" \ | ||
50 | "jle 3b\n\t" \ | ||
51 | "cli\n\t" \ | ||
52 | "jmp 1b\n" \ | ||
53 | "4:\t" \ | ||
54 | "rep;nop\n\t" \ | ||
55 | "cmpb $0, %0\n\t" \ | ||
56 | "jg 1b\n\t" \ | ||
57 | "jmp 4b\n" \ | ||
58 | "5:\n\t" | ||
59 | 25 | ||
60 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
61 | { | 27 | { |
62 | asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); | 28 | asm volatile("\n1:\t" |
29 | LOCK_PREFIX " ; decb %0\n\t" | ||
30 | "jns 3f\n" | ||
31 | "2:\t" | ||
32 | "rep;nop\n\t" | ||
33 | "cmpb $0,%0\n\t" | ||
34 | "jle 2b\n\t" | ||
35 | "jmp 1b\n" | ||
36 | "3:\n\t" | ||
37 | : "+m" (lock->slock) : : "memory"); | ||
63 | } | 38 | } |
64 | 39 | ||
65 | /* | 40 | /* |
66 | * It is easier for the lock validator if interrupts are not re-enabled | 41 | * It is easier for the lock validator if interrupts are not re-enabled |
67 | * in the middle of a lock-acquire. This is a performance feature anyway | 42 | * in the middle of a lock-acquire. This is a performance feature anyway |
68 | * so we turn it off: | 43 | * so we turn it off: |
44 | * | ||
45 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
46 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | ||
69 | */ | 47 | */ |
70 | #ifndef CONFIG_PROVE_LOCKING | 48 | #ifndef CONFIG_PROVE_LOCKING |
71 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 49 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
72 | { | 50 | { |
73 | asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); | 51 | asm volatile( |
52 | "\n1:\t" | ||
53 | LOCK_PREFIX " ; decb %0\n\t" | ||
54 | "jns 5f\n" | ||
55 | "2:\t" | ||
56 | "testl $0x200, %1\n\t" | ||
57 | "jz 4f\n\t" | ||
58 | "sti\n" | ||
59 | "3:\t" | ||
60 | "rep;nop\n\t" | ||
61 | "cmpb $0, %0\n\t" | ||
62 | "jle 3b\n\t" | ||
63 | "cli\n\t" | ||
64 | "jmp 1b\n" | ||
65 | "4:\t" | ||
66 | "rep;nop\n\t" | ||
67 | "cmpb $0, %0\n\t" | ||
68 | "jg 1b\n\t" | ||
69 | "jmp 4b\n" | ||
70 | "5:\n\t" | ||
71 | : "+m" (lock->slock) : "r" (flags) : "memory"); | ||
74 | } | 72 | } |
75 | #endif | 73 | #endif |
76 | 74 | ||
77 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 75 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
78 | { | 76 | { |
79 | char oldval; | 77 | char oldval; |
80 | __asm__ __volatile__( | 78 | asm volatile( |
81 | "xchgb %b0,%1" | 79 | "xchgb %b0,%1" |
82 | :"=q" (oldval), "+m" (lock->slock) | 80 | :"=q" (oldval), "+m" (lock->slock) |
83 | :"0" (0) : "memory"); | 81 | :"0" (0) : "memory"); |
@@ -93,38 +91,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
93 | 91 | ||
94 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | 92 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) |
95 | 93 | ||
96 | #define __raw_spin_unlock_string \ | ||
97 | "movb $1,%0" \ | ||
98 | :"+m" (lock->slock) : : "memory" | ||
99 | |||
100 | |||
101 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 94 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
102 | { | 95 | { |
103 | __asm__ __volatile__( | 96 | asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); |
104 | __raw_spin_unlock_string | ||
105 | ); | ||
106 | } | 97 | } |
107 | 98 | ||
108 | #else | 99 | #else |
109 | 100 | ||
110 | #define __raw_spin_unlock_string \ | ||
111 | "xchgb %b0, %1" \ | ||
112 | :"=q" (oldval), "+m" (lock->slock) \ | ||
113 | :"0" (oldval) : "memory" | ||
114 | |||
115 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 101 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
116 | { | 102 | { |
117 | char oldval = 1; | 103 | char oldval = 1; |
118 | 104 | ||
119 | __asm__ __volatile__( | 105 | asm volatile("xchgb %b0, %1" |
120 | __raw_spin_unlock_string | 106 | : "=q" (oldval), "+m" (lock->slock) |
121 | ); | 107 | : "0" (oldval) : "memory"); |
122 | } | 108 | } |
123 | 109 | ||
124 | #endif | 110 | #endif |
125 | 111 | ||
126 | #define __raw_spin_unlock_wait(lock) \ | 112 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
127 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 113 | { |
114 | while (__raw_spin_is_locked(lock)) | ||
115 | cpu_relax(); | ||
116 | } | ||
128 | 117 | ||
129 | /* | 118 | /* |
130 | * Read-write spinlocks, allowing multiple readers | 119 | * Read-write spinlocks, allowing multiple readers |
@@ -151,22 +140,36 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
151 | * read_can_lock - would read_trylock() succeed? | 140 | * read_can_lock - would read_trylock() succeed? |
152 | * @lock: the rwlock in question. | 141 | * @lock: the rwlock in question. |
153 | */ | 142 | */ |
154 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 143 | static inline int __raw_read_can_lock(raw_rwlock_t *x) |
144 | { | ||
145 | return (int)(x)->lock > 0; | ||
146 | } | ||
155 | 147 | ||
156 | /** | 148 | /** |
157 | * write_can_lock - would write_trylock() succeed? | 149 | * write_can_lock - would write_trylock() succeed? |
158 | * @lock: the rwlock in question. | 150 | * @lock: the rwlock in question. |
159 | */ | 151 | */ |
160 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 152 | static inline int __raw_write_can_lock(raw_rwlock_t *x) |
153 | { | ||
154 | return (x)->lock == RW_LOCK_BIAS; | ||
155 | } | ||
161 | 156 | ||
162 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 157 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
163 | { | 158 | { |
164 | __build_read_lock(rw, "__read_lock_failed"); | 159 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
160 | "jns 1f\n" | ||
161 | "call __read_lock_failed\n\t" | ||
162 | "1:\n" | ||
163 | ::"a" (rw) : "memory"); | ||
165 | } | 164 | } |
166 | 165 | ||
167 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 166 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
168 | { | 167 | { |
169 | __build_write_lock(rw, "__write_lock_failed"); | 168 | asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" |
169 | "jz 1f\n" | ||
170 | "call __write_lock_failed\n\t" | ||
171 | "1:\n" | ||
172 | ::"a" (rw) : "memory"); | ||
170 | } | 173 | } |
171 | 174 | ||
172 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 175 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |