aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/spinlock_32.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 07:30:34 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:34 -0500
commit1075cf7a959f72833e54dd2d4f885617e58e3e0a (patch)
treeefda7563280a3b8a0c8856e2ac39c3c8578b1e11 /include/asm-x86/spinlock_32.h
parentcf244e30f5b50763cbe85f7de30923d12999e38d (diff)
x86: merge spinlock.h variants
Merge them finally together Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/spinlock_32.h')
-rw-r--r--include/asm-x86/spinlock_32.h208
1 files changed, 0 insertions, 208 deletions
diff --git a/include/asm-x86/spinlock_32.h b/include/asm-x86/spinlock_32.h
deleted file mode 100644
index 2de9b8b89903..000000000000
--- a/include/asm-x86/spinlock_32.h
+++ /dev/null
@@ -1,208 +0,0 @@
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <asm/processor.h>
8
9/*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 *
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
14 *
15 * We make no fairness assumptions. They have a cost.
16 *
17 * (the type definitions are in asm/spinlock_types.h)
18 */
19
20typedef char _slock_t;
21#define LOCK_INS_DEC "decb"
22#define LOCK_INS_XCH "xchgb"
23#define LOCK_INS_MOV "movb"
24#define LOCK_INS_CMP "cmpb"
25#define LOCK_PTR_REG "a"
26
27static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
28{
29 return *(volatile _slock_t *)(&(lock)->slock) <= 0;
30}
31
32static inline void __raw_spin_lock(raw_spinlock_t *lock)
33{
34 asm volatile(
35 "\n1:\t"
36 LOCK_PREFIX " ; " LOCK_INS_DEC " %0\n\t"
37 "jns 3f\n"
38 "2:\t"
39 "rep;nop\n\t"
40 LOCK_INS_CMP " $0,%0\n\t"
41 "jle 2b\n\t"
42 "jmp 1b\n"
43 "3:\n\t"
44 : "+m" (lock->slock) : : "memory");
45}
46
47/*
48 * It is easier for the lock validator if interrupts are not re-enabled
49 * in the middle of a lock-acquire. This is a performance feature anyway
50 * so we turn it off:
51 *
52 * NOTE: there's an irqs-on section here, which normally would have to be
53 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
54 */
55#ifndef CONFIG_PROVE_LOCKING
56static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
57 unsigned long flags)
58{
59 asm volatile(
60 "\n1:\t"
61 LOCK_PREFIX " ; " LOCK_INS_DEC " %[slock]\n\t"
62 "jns 5f\n"
63 "testl $0x200, %[flags]\n\t"
64 "jz 4f\n\t"
65 STI_STRING "\n"
66 "3:\t"
67 "rep;nop\n\t"
68 LOCK_INS_CMP " $0, %[slock]\n\t"
69 "jle 3b\n\t"
70 CLI_STRING "\n\t"
71 "jmp 1b\n"
72 "4:\t"
73 "rep;nop\n\t"
74 LOCK_INS_CMP " $0, %[slock]\n\t"
75 "jg 1b\n\t"
76 "jmp 4b\n"
77 "5:\n\t"
78 : [slock] "+m" (lock->slock)
79 : [flags] "r" ((u32)flags)
80 CLI_STI_INPUT_ARGS
81 : "memory" CLI_STI_CLOBBERS);
82}
83#endif
84
85static inline int __raw_spin_trylock(raw_spinlock_t *lock)
86{
87 _slock_t oldval;
88
89 asm volatile(
90 LOCK_INS_XCH " %0,%1"
91 :"=q" (oldval), "+m" (lock->slock)
92 :"0" (0) : "memory");
93
94 return oldval > 0;
95}
96
97/*
98 * __raw_spin_unlock based on writing $1 to the low byte.
99 * This method works. Despite all the confusion.
100 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
101 * (PPro errata 66, 92)
102 */
103
104#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
105
106static inline void __raw_spin_unlock(raw_spinlock_t *lock)
107{
108 asm volatile(LOCK_INS_MOV " $1,%0" : "=m" (lock->slock) :: "memory");
109}
110
111#else
112
113static inline void __raw_spin_unlock(raw_spinlock_t *lock)
114{
115 unsigned char oldval = 1;
116
117 asm volatile("xchgb %b0, %1"
118 : "=q" (oldval), "+m" (lock->slock)
119 : "0" (oldval) : "memory");
120}
121
122#endif
123
124static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
125{
126 while (__raw_spin_is_locked(lock))
127 cpu_relax();
128}
129
130/*
131 * Read-write spinlocks, allowing multiple readers
132 * but only one writer.
133 *
134 * NOTE! it is quite common to have readers in interrupts
135 * but no interrupt writers. For those circumstances we
136 * can "mix" irq-safe locks - any writer needs to get a
137 * irq-safe write-lock, but readers can get non-irqsafe
138 * read-locks.
139 *
140 * On x86, we implement read-write locks as a 32-bit counter
141 * with the high bit (sign) being the "contended" bit.
142 */
143
144static inline int __raw_read_can_lock(raw_rwlock_t *lock)
145{
146 return (int)(lock)->lock > 0;
147}
148
149static inline int __raw_write_can_lock(raw_rwlock_t *lock)
150{
151 return (lock)->lock == RW_LOCK_BIAS;
152}
153
154static inline void __raw_read_lock(raw_rwlock_t *rw)
155{
156 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
157 "jns 1f\n"
158 "call __read_lock_failed\n\t"
159 "1:\n"
160 ::LOCK_PTR_REG (rw) : "memory");
161}
162
163static inline void __raw_write_lock(raw_rwlock_t *rw)
164{
165 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
166 "jz 1f\n"
167 "call __write_lock_failed\n\t"
168 "1:\n"
169 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
170}
171
172static inline int __raw_read_trylock(raw_rwlock_t *lock)
173{
174 atomic_t *count = (atomic_t *)lock;
175
176 atomic_dec(count);
177 if (atomic_read(count) >= 0)
178 return 1;
179 atomic_inc(count);
180 return 0;
181}
182
183static inline int __raw_write_trylock(raw_rwlock_t *lock)
184{
185 atomic_t *count = (atomic_t *)lock;
186
187 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
188 return 1;
189 atomic_add(RW_LOCK_BIAS, count);
190 return 0;
191}
192
193static inline void __raw_read_unlock(raw_rwlock_t *rw)
194{
195 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
196}
197
198static inline void __raw_write_unlock(raw_rwlock_t *rw)
199{
200 asm volatile(LOCK_PREFIX "addl %1, %0"
201 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
202}
203
204#define _raw_spin_relax(lock) cpu_relax()
205#define _raw_read_relax(lock) cpu_relax()
206#define _raw_write_relax(lock) cpu_relax()
207
208#endif /* __ASM_SPINLOCK_H */