diff options
author | Joe Perches <joe@perches.com> | 2008-03-23 04:03:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:27 -0400 |
commit | d3bf60a6e48c9a451cac345c0ad57552bb299992 (patch) | |
tree | c1020e7c6a8f38e78e71e079dffa2f91bb1a6765 /include/asm-x86/spinlock.h | |
parent | ceb7ce1052a9087bd4752424f253b883ec5e1cec (diff) |
include/asm-x86/spinlock.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/spinlock.h')
-rw-r--r-- | include/asm-x86/spinlock.h | 105 |
1 files changed, 50 insertions, 55 deletions
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 23804c1890ff..47dfe2607bb1 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
82 | { | 82 | { |
83 | short inc = 0x0100; | 83 | short inc = 0x0100; |
84 | 84 | ||
85 | __asm__ __volatile__ ( | 85 | asm volatile ( |
86 | LOCK_PREFIX "xaddw %w0, %1\n" | 86 | LOCK_PREFIX "xaddw %w0, %1\n" |
87 | "1:\t" | 87 | "1:\t" |
88 | "cmpb %h0, %b0\n\t" | 88 | "cmpb %h0, %b0\n\t" |
@@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
92 | /* don't need lfence here, because loads are in-order */ | 92 | /* don't need lfence here, because loads are in-order */ |
93 | "jmp 1b\n" | 93 | "jmp 1b\n" |
94 | "2:" | 94 | "2:" |
95 | :"+Q" (inc), "+m" (lock->slock) | 95 | : "+Q" (inc), "+m" (lock->slock) |
96 | : | 96 | : |
97 | :"memory", "cc"); | 97 | : "memory", "cc"); |
98 | } | 98 | } |
99 | 99 | ||
100 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 100 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
@@ -104,30 +104,28 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
104 | int tmp; | 104 | int tmp; |
105 | short new; | 105 | short new; |
106 | 106 | ||
107 | asm volatile( | 107 | asm volatile("movw %2,%w0\n\t" |
108 | "movw %2,%w0\n\t" | 108 | "cmpb %h0,%b0\n\t" |
109 | "cmpb %h0,%b0\n\t" | 109 | "jne 1f\n\t" |
110 | "jne 1f\n\t" | 110 | "movw %w0,%w1\n\t" |
111 | "movw %w0,%w1\n\t" | 111 | "incb %h1\n\t" |
112 | "incb %h1\n\t" | 112 | "lock ; cmpxchgw %w1,%2\n\t" |
113 | "lock ; cmpxchgw %w1,%2\n\t" | 113 | "1:" |
114 | "1:" | 114 | "sete %b1\n\t" |
115 | "sete %b1\n\t" | 115 | "movzbl %b1,%0\n\t" |
116 | "movzbl %b1,%0\n\t" | 116 | : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) |
117 | :"=&a" (tmp), "=Q" (new), "+m" (lock->slock) | 117 | : |
118 | : | 118 | : "memory", "cc"); |
119 | : "memory", "cc"); | ||
120 | 119 | ||
121 | return tmp; | 120 | return tmp; |
122 | } | 121 | } |
123 | 122 | ||
124 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 123 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
125 | { | 124 | { |
126 | __asm__ __volatile__( | 125 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
127 | UNLOCK_LOCK_PREFIX "incb %0" | 126 | : "+m" (lock->slock) |
128 | :"+m" (lock->slock) | 127 | : |
129 | : | 128 | : "memory", "cc"); |
130 | :"memory", "cc"); | ||
131 | } | 129 | } |
132 | #else | 130 | #else |
133 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 131 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
@@ -149,21 +147,20 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
149 | int inc = 0x00010000; | 147 | int inc = 0x00010000; |
150 | int tmp; | 148 | int tmp; |
151 | 149 | ||
152 | __asm__ __volatile__ ( | 150 | asm volatile("lock ; xaddl %0, %1\n" |
153 | "lock ; xaddl %0, %1\n" | 151 | "movzwl %w0, %2\n\t" |
154 | "movzwl %w0, %2\n\t" | 152 | "shrl $16, %0\n\t" |
155 | "shrl $16, %0\n\t" | 153 | "1:\t" |
156 | "1:\t" | 154 | "cmpl %0, %2\n\t" |
157 | "cmpl %0, %2\n\t" | 155 | "je 2f\n\t" |
158 | "je 2f\n\t" | 156 | "rep ; nop\n\t" |
159 | "rep ; nop\n\t" | 157 | "movzwl %1, %2\n\t" |
160 | "movzwl %1, %2\n\t" | 158 | /* don't need lfence here, because loads are in-order */ |
161 | /* don't need lfence here, because loads are in-order */ | 159 | "jmp 1b\n" |
162 | "jmp 1b\n" | 160 | "2:" |
163 | "2:" | 161 | : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) |
164 | :"+Q" (inc), "+m" (lock->slock), "=r" (tmp) | 162 | : |
165 | : | 163 | : "memory", "cc"); |
166 | :"memory", "cc"); | ||
167 | } | 164 | } |
168 | 165 | ||
169 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 166 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
@@ -173,31 +170,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
173 | int tmp; | 170 | int tmp; |
174 | int new; | 171 | int new; |
175 | 172 | ||
176 | asm volatile( | 173 | asm volatile("movl %2,%0\n\t" |
177 | "movl %2,%0\n\t" | 174 | "movl %0,%1\n\t" |
178 | "movl %0,%1\n\t" | 175 | "roll $16, %0\n\t" |
179 | "roll $16, %0\n\t" | 176 | "cmpl %0,%1\n\t" |
180 | "cmpl %0,%1\n\t" | 177 | "jne 1f\n\t" |
181 | "jne 1f\n\t" | 178 | "addl $0x00010000, %1\n\t" |
182 | "addl $0x00010000, %1\n\t" | 179 | "lock ; cmpxchgl %1,%2\n\t" |
183 | "lock ; cmpxchgl %1,%2\n\t" | 180 | "1:" |
184 | "1:" | 181 | "sete %b1\n\t" |
185 | "sete %b1\n\t" | 182 | "movzbl %b1,%0\n\t" |
186 | "movzbl %b1,%0\n\t" | 183 | : "=&a" (tmp), "=r" (new), "+m" (lock->slock) |
187 | :"=&a" (tmp), "=r" (new), "+m" (lock->slock) | 184 | : |
188 | : | 185 | : "memory", "cc"); |
189 | : "memory", "cc"); | ||
190 | 186 | ||
191 | return tmp; | 187 | return tmp; |
192 | } | 188 | } |
193 | 189 | ||
194 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 190 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
195 | { | 191 | { |
196 | __asm__ __volatile__( | 192 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
197 | UNLOCK_LOCK_PREFIX "incw %0" | 193 | : "+m" (lock->slock) |
198 | :"+m" (lock->slock) | 194 | : |
199 | : | 195 | : "memory", "cc"); |
200 | :"memory", "cc"); | ||
201 | } | 196 | } |
202 | #endif | 197 | #endif |
203 | 198 | ||