diff options
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 14 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 14 | ||||
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 62 | ||||
-rw-r--r-- | arch/x86/include/asm/spinlock_types.h | 10 |
4 files changed, 50 insertions, 50 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index efb38994859c..dd59a85a918f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
731 | 731 | ||
732 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) | 732 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
733 | 733 | ||
734 | static inline int __raw_spin_is_locked(struct raw_spinlock *lock) | 734 | static inline int arch_spin_is_locked(struct arch_spinlock *lock) |
735 | { | 735 | { |
736 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); | 736 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); |
737 | } | 737 | } |
738 | 738 | ||
739 | static inline int __raw_spin_is_contended(struct raw_spinlock *lock) | 739 | static inline int arch_spin_is_contended(struct arch_spinlock *lock) |
740 | { | 740 | { |
741 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); | 741 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); |
742 | } | 742 | } |
743 | #define __raw_spin_is_contended __raw_spin_is_contended | 743 | #define arch_spin_is_contended arch_spin_is_contended |
744 | 744 | ||
745 | static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) | 745 | static __always_inline void arch_spin_lock(struct arch_spinlock *lock) |
746 | { | 746 | { |
747 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); | 747 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); |
748 | } | 748 | } |
749 | 749 | ||
750 | static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, | 750 | static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, |
751 | unsigned long flags) | 751 | unsigned long flags) |
752 | { | 752 | { |
753 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); | 753 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); |
754 | } | 754 | } |
755 | 755 | ||
756 | static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) | 756 | static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) |
757 | { | 757 | { |
758 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); | 758 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); |
759 | } | 759 | } |
760 | 760 | ||
761 | static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) | 761 | static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) |
762 | { | 762 | { |
763 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); | 763 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); |
764 | } | 764 | } |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 9357473c8da0..b1e70d51e40c 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -318,14 +318,14 @@ struct pv_mmu_ops { | |||
318 | phys_addr_t phys, pgprot_t flags); | 318 | phys_addr_t phys, pgprot_t flags); |
319 | }; | 319 | }; |
320 | 320 | ||
321 | struct raw_spinlock; | 321 | struct arch_spinlock; |
322 | struct pv_lock_ops { | 322 | struct pv_lock_ops { |
323 | int (*spin_is_locked)(struct raw_spinlock *lock); | 323 | int (*spin_is_locked)(struct arch_spinlock *lock); |
324 | int (*spin_is_contended)(struct raw_spinlock *lock); | 324 | int (*spin_is_contended)(struct arch_spinlock *lock); |
325 | void (*spin_lock)(struct raw_spinlock *lock); | 325 | void (*spin_lock)(struct arch_spinlock *lock); |
326 | void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); | 326 | void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); |
327 | int (*spin_trylock)(struct raw_spinlock *lock); | 327 | int (*spin_trylock)(struct arch_spinlock *lock); |
328 | void (*spin_unlock)(struct raw_spinlock *lock); | 328 | void (*spin_unlock)(struct arch_spinlock *lock); |
329 | }; | 329 | }; |
330 | 330 | ||
331 | /* This contains all the paravirt structures: we get a convenient | 331 | /* This contains all the paravirt structures: we get a convenient |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 4e77853321db..3089f70c0c52 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -58,7 +58,7 @@ | |||
58 | #if (NR_CPUS < 256) | 58 | #if (NR_CPUS < 256) |
59 | #define TICKET_SHIFT 8 | 59 | #define TICKET_SHIFT 8 |
60 | 60 | ||
61 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 61 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
62 | { | 62 | { |
63 | short inc = 0x0100; | 63 | short inc = 0x0100; |
64 | 64 | ||
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
77 | : "memory", "cc"); | 77 | : "memory", "cc"); |
78 | } | 78 | } |
79 | 79 | ||
80 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 80 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
81 | { | 81 | { |
82 | int tmp, new; | 82 | int tmp, new; |
83 | 83 | ||
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
96 | return tmp; | 96 | return tmp; |
97 | } | 97 | } |
98 | 98 | ||
99 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 99 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
100 | { | 100 | { |
101 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" | 101 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
102 | : "+m" (lock->slock) | 102 | : "+m" (lock->slock) |
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
106 | #else | 106 | #else |
107 | #define TICKET_SHIFT 16 | 107 | #define TICKET_SHIFT 16 |
108 | 108 | ||
109 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 109 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
110 | { | 110 | { |
111 | int inc = 0x00010000; | 111 | int inc = 0x00010000; |
112 | int tmp; | 112 | int tmp; |
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
127 | : "memory", "cc"); | 127 | : "memory", "cc"); |
128 | } | 128 | } |
129 | 129 | ||
130 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 130 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
131 | { | 131 | { |
132 | int tmp; | 132 | int tmp; |
133 | int new; | 133 | int new; |
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
149 | return tmp; | 149 | return tmp; |
150 | } | 150 | } |
151 | 151 | ||
152 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 152 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
153 | { | 153 | { |
154 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" | 154 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
155 | : "+m" (lock->slock) | 155 | : "+m" (lock->slock) |
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
158 | } | 158 | } |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 161 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
162 | { | 162 | { |
163 | int tmp = ACCESS_ONCE(lock->slock); | 163 | int tmp = ACCESS_ONCE(lock->slock); |
164 | 164 | ||
165 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); | 165 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); |
166 | } | 166 | } |
167 | 167 | ||
168 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 168 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
169 | { | 169 | { |
170 | int tmp = ACCESS_ONCE(lock->slock); | 170 | int tmp = ACCESS_ONCE(lock->slock); |
171 | 171 | ||
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | |||
174 | 174 | ||
175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS | 175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
176 | 176 | ||
177 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 177 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
178 | { | 178 | { |
179 | return __ticket_spin_is_locked(lock); | 179 | return __ticket_spin_is_locked(lock); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 182 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
183 | { | 183 | { |
184 | return __ticket_spin_is_contended(lock); | 184 | return __ticket_spin_is_contended(lock); |
185 | } | 185 | } |
186 | #define __raw_spin_is_contended __raw_spin_is_contended | 186 | #define arch_spin_is_contended arch_spin_is_contended |
187 | 187 | ||
188 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 188 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
189 | { | 189 | { |
190 | __ticket_spin_lock(lock); | 190 | __ticket_spin_lock(lock); |
191 | } | 191 | } |
192 | 192 | ||
193 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | 193 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
194 | { | 194 | { |
195 | return __ticket_spin_trylock(lock); | 195 | return __ticket_spin_trylock(lock); |
196 | } | 196 | } |
197 | 197 | ||
198 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | 198 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
199 | { | 199 | { |
200 | __ticket_spin_unlock(lock); | 200 | __ticket_spin_unlock(lock); |
201 | } | 201 | } |
202 | 202 | ||
203 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | 203 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
204 | unsigned long flags) | 204 | unsigned long flags) |
205 | { | 205 | { |
206 | __raw_spin_lock(lock); | 206 | arch_spin_lock(lock); |
207 | } | 207 | } |
208 | 208 | ||
209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ | 209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
210 | 210 | ||
211 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 211 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
212 | { | 212 | { |
213 | while (__raw_spin_is_locked(lock)) | 213 | while (arch_spin_is_locked(lock)) |
214 | cpu_relax(); | 214 | cpu_relax(); |
215 | } | 215 | } |
216 | 216 | ||
@@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | |||
232 | * read_can_lock - would read_trylock() succeed? | 232 | * read_can_lock - would read_trylock() succeed? |
233 | * @lock: the rwlock in question. | 233 | * @lock: the rwlock in question. |
234 | */ | 234 | */ |
235 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | 235 | static inline int arch_read_can_lock(arch_rwlock_t *lock) |
236 | { | 236 | { |
237 | return (int)(lock)->lock > 0; | 237 | return (int)(lock)->lock > 0; |
238 | } | 238 | } |
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock) | |||
241 | * write_can_lock - would write_trylock() succeed? | 241 | * write_can_lock - would write_trylock() succeed? |
242 | * @lock: the rwlock in question. | 242 | * @lock: the rwlock in question. |
243 | */ | 243 | */ |
244 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | 244 | static inline int arch_write_can_lock(arch_rwlock_t *lock) |
245 | { | 245 | { |
246 | return (lock)->lock == RW_LOCK_BIAS; | 246 | return (lock)->lock == RW_LOCK_BIAS; |
247 | } | 247 | } |
248 | 248 | ||
249 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 249 | static inline void arch_read_lock(arch_rwlock_t *rw) |
250 | { | 250 | { |
251 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | 251 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
252 | "jns 1f\n" | 252 | "jns 1f\n" |
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
255 | ::LOCK_PTR_REG (rw) : "memory"); | 255 | ::LOCK_PTR_REG (rw) : "memory"); |
256 | } | 256 | } |
257 | 257 | ||
258 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 258 | static inline void arch_write_lock(arch_rwlock_t *rw) |
259 | { | 259 | { |
260 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | 260 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" |
261 | "jz 1f\n" | 261 | "jz 1f\n" |
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
264 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | 264 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); |
265 | } | 265 | } |
266 | 266 | ||
267 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 267 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
268 | { | 268 | { |
269 | atomic_t *count = (atomic_t *)lock; | 269 | atomic_t *count = (atomic_t *)lock; |
270 | 270 | ||
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 277 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
278 | { | 278 | { |
279 | atomic_t *count = (atomic_t *)lock; | 279 | atomic_t *count = (atomic_t *)lock; |
280 | 280 | ||
@@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
284 | return 0; | 284 | return 0; |
285 | } | 285 | } |
286 | 286 | ||
287 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 287 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
288 | { | 288 | { |
289 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | 289 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); |
290 | } | 290 | } |
291 | 291 | ||
292 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 292 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
293 | { | 293 | { |
294 | asm volatile(LOCK_PREFIX "addl %1, %0" | 294 | asm volatile(LOCK_PREFIX "addl %1, %0" |
295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | 295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); |
296 | } | 296 | } |
297 | 297 | ||
298 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 298 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
299 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 299 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
300 | 300 | ||
301 | #define _raw_spin_relax(lock) cpu_relax() | 301 | #define arch_spin_relax(lock) cpu_relax() |
302 | #define _raw_read_relax(lock) cpu_relax() | 302 | #define arch_read_relax(lock) cpu_relax() |
303 | #define _raw_write_relax(lock) cpu_relax() | 303 | #define arch_write_relax(lock) cpu_relax() |
304 | 304 | ||
305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | 305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ |
306 | static inline void smp_mb__after_lock(void) { } | 306 | static inline void smp_mb__after_lock(void) { } |
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 845f81c87091..dcb48b2edc11 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h | |||
@@ -5,16 +5,16 @@ | |||
5 | # error "please don't include this file directly" | 5 | # error "please don't include this file directly" |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct raw_spinlock { | 8 | typedef struct arch_spinlock { |
9 | unsigned int slock; | 9 | unsigned int slock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | unsigned int lock; | 15 | unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
19 | 19 | ||
20 | #endif /* _ASM_X86_SPINLOCK_TYPES_H */ | 20 | #endif /* _ASM_X86_SPINLOCK_TYPES_H */ |