diff options
Diffstat (limited to 'include/asm-x86/spinlock.h')
-rw-r--r-- | include/asm-x86/spinlock.h | 79 |
1 files changed, 37 insertions, 42 deletions
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index e39c790dbfd2..157ff7fab97a 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _X86_SPINLOCK_H_ | 1 | #ifndef ASM_X86__SPINLOCK_H |
2 | #define _X86_SPINLOCK_H_ | 2 | #define ASM_X86__SPINLOCK_H |
3 | 3 | ||
4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
5 | #include <asm/rwlock.h> | 5 | #include <asm/rwlock.h> |
@@ -21,8 +21,10 @@ | |||
21 | 21 | ||
22 | #ifdef CONFIG_X86_32 | 22 | #ifdef CONFIG_X86_32 |
23 | # define LOCK_PTR_REG "a" | 23 | # define LOCK_PTR_REG "a" |
24 | # define REG_PTR_MODE "k" | ||
24 | #else | 25 | #else |
25 | # define LOCK_PTR_REG "D" | 26 | # define LOCK_PTR_REG "D" |
27 | # define REG_PTR_MODE "q" | ||
26 | #endif | 28 | #endif |
27 | 29 | ||
28 | #if defined(CONFIG_X86_32) && \ | 30 | #if defined(CONFIG_X86_32) && \ |
@@ -54,19 +56,7 @@ | |||
54 | * much between them in performance though, especially as locks are out of line. | 56 | * much between them in performance though, especially as locks are out of line. |
55 | */ | 57 | */ |
56 | #if (NR_CPUS < 256) | 58 | #if (NR_CPUS < 256) |
57 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 59 | #define TICKET_SHIFT 8 |
58 | { | ||
59 | int tmp = ACCESS_ONCE(lock->slock); | ||
60 | |||
61 | return (((tmp >> 8) & 0xff) != (tmp & 0xff)); | ||
62 | } | ||
63 | |||
64 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | ||
65 | { | ||
66 | int tmp = ACCESS_ONCE(lock->slock); | ||
67 | |||
68 | return (((tmp >> 8) - tmp) & 0xff) > 1; | ||
69 | } | ||
70 | 60 | ||
71 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 61 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
72 | { | 62 | { |
@@ -89,19 +79,17 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
89 | 79 | ||
90 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 80 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
91 | { | 81 | { |
92 | int tmp; | 82 | int tmp, new; |
93 | short new; | ||
94 | 83 | ||
95 | asm volatile("movw %2,%w0\n\t" | 84 | asm volatile("movzwl %2, %0\n\t" |
96 | "cmpb %h0,%b0\n\t" | 85 | "cmpb %h0,%b0\n\t" |
86 | "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" | ||
97 | "jne 1f\n\t" | 87 | "jne 1f\n\t" |
98 | "movw %w0,%w1\n\t" | 88 | LOCK_PREFIX "cmpxchgw %w1,%2\n\t" |
99 | "incb %h1\n\t" | ||
100 | "lock ; cmpxchgw %w1,%2\n\t" | ||
101 | "1:" | 89 | "1:" |
102 | "sete %b1\n\t" | 90 | "sete %b1\n\t" |
103 | "movzbl %b1,%0\n\t" | 91 | "movzbl %b1,%0\n\t" |
104 | : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) | 92 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) |
105 | : | 93 | : |
106 | : "memory", "cc"); | 94 | : "memory", "cc"); |
107 | 95 | ||
@@ -116,26 +104,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
116 | : "memory", "cc"); | 104 | : "memory", "cc"); |
117 | } | 105 | } |
118 | #else | 106 | #else |
119 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 107 | #define TICKET_SHIFT 16 |
120 | { | ||
121 | int tmp = ACCESS_ONCE(lock->slock); | ||
122 | |||
123 | return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); | ||
124 | } | ||
125 | |||
126 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | ||
127 | { | ||
128 | int tmp = ACCESS_ONCE(lock->slock); | ||
129 | |||
130 | return (((tmp >> 16) - tmp) & 0xffff) > 1; | ||
131 | } | ||
132 | 108 | ||
133 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 109 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
134 | { | 110 | { |
135 | int inc = 0x00010000; | 111 | int inc = 0x00010000; |
136 | int tmp; | 112 | int tmp; |
137 | 113 | ||
138 | asm volatile("lock ; xaddl %0, %1\n" | 114 | asm volatile(LOCK_PREFIX "xaddl %0, %1\n" |
139 | "movzwl %w0, %2\n\t" | 115 | "movzwl %w0, %2\n\t" |
140 | "shrl $16, %0\n\t" | 116 | "shrl $16, %0\n\t" |
141 | "1:\t" | 117 | "1:\t" |
@@ -146,7 +122,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
146 | /* don't need lfence here, because loads are in-order */ | 122 | /* don't need lfence here, because loads are in-order */ |
147 | "jmp 1b\n" | 123 | "jmp 1b\n" |
148 | "2:" | 124 | "2:" |
149 | : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) | 125 | : "+r" (inc), "+m" (lock->slock), "=&r" (tmp) |
150 | : | 126 | : |
151 | : "memory", "cc"); | 127 | : "memory", "cc"); |
152 | } | 128 | } |
@@ -160,13 +136,13 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
160 | "movl %0,%1\n\t" | 136 | "movl %0,%1\n\t" |
161 | "roll $16, %0\n\t" | 137 | "roll $16, %0\n\t" |
162 | "cmpl %0,%1\n\t" | 138 | "cmpl %0,%1\n\t" |
139 | "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t" | ||
163 | "jne 1f\n\t" | 140 | "jne 1f\n\t" |
164 | "addl $0x00010000, %1\n\t" | 141 | LOCK_PREFIX "cmpxchgl %1,%2\n\t" |
165 | "lock ; cmpxchgl %1,%2\n\t" | ||
166 | "1:" | 142 | "1:" |
167 | "sete %b1\n\t" | 143 | "sete %b1\n\t" |
168 | "movzbl %b1,%0\n\t" | 144 | "movzbl %b1,%0\n\t" |
169 | : "=&a" (tmp), "=r" (new), "+m" (lock->slock) | 145 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) |
170 | : | 146 | : |
171 | : "memory", "cc"); | 147 | : "memory", "cc"); |
172 | 148 | ||
@@ -182,7 +158,19 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
182 | } | 158 | } |
183 | #endif | 159 | #endif |
184 | 160 | ||
185 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 161 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
162 | { | ||
163 | int tmp = ACCESS_ONCE(lock->slock); | ||
164 | |||
165 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); | ||
166 | } | ||
167 | |||
168 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | ||
169 | { | ||
170 | int tmp = ACCESS_ONCE(lock->slock); | ||
171 | |||
172 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; | ||
173 | } | ||
186 | 174 | ||
187 | #ifdef CONFIG_PARAVIRT | 175 | #ifdef CONFIG_PARAVIRT |
188 | /* | 176 | /* |
@@ -272,6 +260,13 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
272 | { | 260 | { |
273 | __ticket_spin_unlock(lock); | 261 | __ticket_spin_unlock(lock); |
274 | } | 262 | } |
263 | |||
264 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | ||
265 | unsigned long flags) | ||
266 | { | ||
267 | __raw_spin_lock(lock); | ||
268 | } | ||
269 | |||
275 | #endif /* CONFIG_PARAVIRT */ | 270 | #endif /* CONFIG_PARAVIRT */ |
276 | 271 | ||
277 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 272 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
@@ -366,4 +361,4 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
366 | #define _raw_read_relax(lock) cpu_relax() | 361 | #define _raw_read_relax(lock) cpu_relax() |
367 | #define _raw_write_relax(lock) cpu_relax() | 362 | #define _raw_write_relax(lock) cpu_relax() |
368 | 363 | ||
369 | #endif | 364 | #endif /* ASM_X86__SPINLOCK_H */ |