diff options
-rw-r--r-- | include/asm-x86/spinlock.h | 211 | ||||
-rw-r--r-- | include/asm-x86/spinlock_32.h | 208 | ||||
-rw-r--r-- | include/asm-x86/spinlock_64.h | 186 |
3 files changed, 209 insertions, 396 deletions
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index e1d555a3dfe..afd4b80ff0a 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -1,6 +1,22 @@ | |||
1 | #ifndef _X86_SPINLOCK_H_ | 1 | #ifndef _X86_SPINLOCK_H_ |
2 | #define _X86_SPINLOCK_H_ | 2 | #define _X86_SPINLOCK_H_ |
3 | 3 | ||
4 | #include <asm/atomic.h> | ||
5 | #include <asm/rwlock.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <asm/processor.h> | ||
8 | |||
9 | /* | ||
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
11 | * | ||
12 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
13 | * on the local processor, one does not. | ||
14 | * | ||
15 | * We make no fairness assumptions. They have a cost. | ||
16 | * | ||
17 | * (the type definitions are in asm/spinlock_types.h) | ||
18 | */ | ||
19 | |||
4 | #ifdef CONFIG_PARAVIRT | 20 | #ifdef CONFIG_PARAVIRT |
5 | #include <asm/paravirt.h> | 21 | #include <asm/paravirt.h> |
6 | #else | 22 | #else |
@@ -11,9 +27,200 @@ | |||
11 | #endif /* CONFIG_PARAVIRT */ | 27 | #endif /* CONFIG_PARAVIRT */ |
12 | 28 | ||
13 | #ifdef CONFIG_X86_32 | 29 | #ifdef CONFIG_X86_32 |
14 | # include "spinlock_32.h" | 30 | typedef char _slock_t; |
31 | # define LOCK_INS_DEC "decb" | ||
32 | # define LOCK_INS_XCH "xchgb" | ||
33 | # define LOCK_INS_MOV "movb" | ||
34 | # define LOCK_INS_CMP "cmpb" | ||
35 | # define LOCK_PTR_REG "a" | ||
15 | #else | 36 | #else |
16 | # include "spinlock_64.h" | 37 | typedef int _slock_t; |
38 | # define LOCK_INS_DEC "decl" | ||
39 | # define LOCK_INS_XCH "xchgl" | ||
40 | # define LOCK_INS_MOV "movl" | ||
41 | # define LOCK_INS_CMP "cmpl" | ||
42 | # define LOCK_PTR_REG "D" | ||
43 | #endif | ||
44 | |||
45 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
46 | { | ||
47 | return *(volatile _slock_t *)(&(lock)->slock) <= 0; | ||
48 | } | ||
49 | |||
50 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
51 | { | ||
52 | asm volatile( | ||
53 | "\n1:\t" | ||
54 | LOCK_PREFIX " ; " LOCK_INS_DEC " %0\n\t" | ||
55 | "jns 3f\n" | ||
56 | "2:\t" | ||
57 | "rep;nop\n\t" | ||
58 | LOCK_INS_CMP " $0,%0\n\t" | ||
59 | "jle 2b\n\t" | ||
60 | "jmp 1b\n" | ||
61 | "3:\n\t" | ||
62 | : "+m" (lock->slock) : : "memory"); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * It is easier for the lock validator if interrupts are not re-enabled | ||
67 | * in the middle of a lock-acquire. This is a performance feature anyway | ||
68 | * so we turn it off: | ||
69 | * | ||
70 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
71 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | ||
72 | */ | ||
73 | #ifndef CONFIG_PROVE_LOCKING | ||
74 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | ||
75 | unsigned long flags) | ||
76 | { | ||
77 | asm volatile( | ||
78 | "\n1:\t" | ||
79 | LOCK_PREFIX " ; " LOCK_INS_DEC " %[slock]\n\t" | ||
80 | "jns 5f\n" | ||
81 | "testl $0x200, %[flags]\n\t" | ||
82 | "jz 4f\n\t" | ||
83 | STI_STRING "\n" | ||
84 | "3:\t" | ||
85 | "rep;nop\n\t" | ||
86 | LOCK_INS_CMP " $0, %[slock]\n\t" | ||
87 | "jle 3b\n\t" | ||
88 | CLI_STRING "\n\t" | ||
89 | "jmp 1b\n" | ||
90 | "4:\t" | ||
91 | "rep;nop\n\t" | ||
92 | LOCK_INS_CMP " $0, %[slock]\n\t" | ||
93 | "jg 1b\n\t" | ||
94 | "jmp 4b\n" | ||
95 | "5:\n\t" | ||
96 | : [slock] "+m" (lock->slock) | ||
97 | : [flags] "r" ((u32)flags) | ||
98 | CLI_STI_INPUT_ARGS | ||
99 | : "memory" CLI_STI_CLOBBERS); | ||
100 | } | ||
101 | #endif | ||
102 | |||
103 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
104 | { | ||
105 | _slock_t oldval; | ||
106 | |||
107 | asm volatile( | ||
108 | LOCK_INS_XCH " %0,%1" | ||
109 | :"=q" (oldval), "+m" (lock->slock) | ||
110 | :"0" (0) : "memory"); | ||
111 | |||
112 | return oldval > 0; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * __raw_spin_unlock based on writing $1 to the low byte. | ||
117 | * This method works. Despite all the confusion. | ||
118 | * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) | ||
119 | * (PPro errata 66, 92) | ||
120 | */ | ||
121 | #if defined(X86_64) || \ | ||
122 | (!defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)) | ||
123 | |||
124 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
125 | { | ||
126 | asm volatile(LOCK_INS_MOV " $1,%0" : "=m" (lock->slock) :: "memory"); | ||
127 | } | ||
128 | |||
129 | #else | ||
130 | |||
131 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
132 | { | ||
133 | unsigned char oldval = 1; | ||
134 | |||
135 | asm volatile("xchgb %b0, %1" | ||
136 | : "=q" (oldval), "+m" (lock->slock) | ||
137 | : "0" (oldval) : "memory"); | ||
138 | } | ||
139 | |||
17 | #endif | 140 | #endif |
18 | 141 | ||
142 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
143 | { | ||
144 | while (__raw_spin_is_locked(lock)) | ||
145 | cpu_relax(); | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Read-write spinlocks, allowing multiple readers | ||
150 | * but only one writer. | ||
151 | * | ||
152 | * NOTE! it is quite common to have readers in interrupts | ||
153 | * but no interrupt writers. For those circumstances we | ||
154 | * can "mix" irq-safe locks - any writer needs to get a | ||
155 | * irq-safe write-lock, but readers can get non-irqsafe | ||
156 | * read-locks. | ||
157 | * | ||
158 | * On x86, we implement read-write locks as a 32-bit counter | ||
159 | * with the high bit (sign) being the "contended" bit. | ||
160 | */ | ||
161 | |||
162 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | ||
163 | { | ||
164 | return (int)(lock)->lock > 0; | ||
165 | } | ||
166 | |||
167 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | ||
168 | { | ||
169 | return (lock)->lock == RW_LOCK_BIAS; | ||
170 | } | ||
171 | |||
172 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
173 | { | ||
174 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | ||
175 | "jns 1f\n" | ||
176 | "call __read_lock_failed\n\t" | ||
177 | "1:\n" | ||
178 | ::LOCK_PTR_REG (rw) : "memory"); | ||
179 | } | ||
180 | |||
181 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
182 | { | ||
183 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | ||
184 | "jz 1f\n" | ||
185 | "call __write_lock_failed\n\t" | ||
186 | "1:\n" | ||
187 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
188 | } | ||
189 | |||
190 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | ||
191 | { | ||
192 | atomic_t *count = (atomic_t *)lock; | ||
193 | |||
194 | atomic_dec(count); | ||
195 | if (atomic_read(count) >= 0) | ||
196 | return 1; | ||
197 | atomic_inc(count); | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | ||
202 | { | ||
203 | atomic_t *count = (atomic_t *)lock; | ||
204 | |||
205 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | ||
206 | return 1; | ||
207 | atomic_add(RW_LOCK_BIAS, count); | ||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
212 | { | ||
213 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | ||
214 | } | ||
215 | |||
216 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
217 | { | ||
218 | asm volatile(LOCK_PREFIX "addl %1, %0" | ||
219 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | ||
220 | } | ||
221 | |||
222 | #define _raw_spin_relax(lock) cpu_relax() | ||
223 | #define _raw_read_relax(lock) cpu_relax() | ||
224 | #define _raw_write_relax(lock) cpu_relax() | ||
225 | |||
19 | #endif | 226 | #endif |
diff --git a/include/asm-x86/spinlock_32.h b/include/asm-x86/spinlock_32.h deleted file mode 100644 index 2de9b8b8990..00000000000 --- a/include/asm-x86/spinlock_32.h +++ /dev/null | |||
@@ -1,208 +0,0 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | #include <asm/rwlock.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <asm/processor.h> | ||
8 | |||
9 | /* | ||
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
11 | * | ||
12 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
13 | * on the local processor, one does not. | ||
14 | * | ||
15 | * We make no fairness assumptions. They have a cost. | ||
16 | * | ||
17 | * (the type definitions are in asm/spinlock_types.h) | ||
18 | */ | ||
19 | |||
20 | typedef char _slock_t; | ||
21 | #define LOCK_INS_DEC "decb" | ||
22 | #define LOCK_INS_XCH "xchgb" | ||
23 | #define LOCK_INS_MOV "movb" | ||
24 | #define LOCK_INS_CMP "cmpb" | ||
25 | #define LOCK_PTR_REG "a" | ||
26 | |||
27 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
28 | { | ||
29 | return *(volatile _slock_t *)(&(lock)->slock) <= 0; | ||
30 | } | ||
31 | |||
32 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
33 | { | ||
34 | asm volatile( | ||
35 | "\n1:\t" | ||
36 | LOCK_PREFIX " ; " LOCK_INS_DEC " %0\n\t" | ||
37 | "jns 3f\n" | ||
38 | "2:\t" | ||
39 | "rep;nop\n\t" | ||
40 | LOCK_INS_CMP " $0,%0\n\t" | ||
41 | "jle 2b\n\t" | ||
42 | "jmp 1b\n" | ||
43 | "3:\n\t" | ||
44 | : "+m" (lock->slock) : : "memory"); | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * It is easier for the lock validator if interrupts are not re-enabled | ||
49 | * in the middle of a lock-acquire. This is a performance feature anyway | ||
50 | * so we turn it off: | ||
51 | * | ||
52 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
53 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | ||
54 | */ | ||
55 | #ifndef CONFIG_PROVE_LOCKING | ||
56 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | ||
57 | unsigned long flags) | ||
58 | { | ||
59 | asm volatile( | ||
60 | "\n1:\t" | ||
61 | LOCK_PREFIX " ; " LOCK_INS_DEC " %[slock]\n\t" | ||
62 | "jns 5f\n" | ||
63 | "testl $0x200, %[flags]\n\t" | ||
64 | "jz 4f\n\t" | ||
65 | STI_STRING "\n" | ||
66 | "3:\t" | ||
67 | "rep;nop\n\t" | ||
68 | LOCK_INS_CMP " $0, %[slock]\n\t" | ||
69 | "jle 3b\n\t" | ||
70 | CLI_STRING "\n\t" | ||
71 | "jmp 1b\n" | ||
72 | "4:\t" | ||
73 | "rep;nop\n\t" | ||
74 | LOCK_INS_CMP " $0, %[slock]\n\t" | ||
75 | "jg 1b\n\t" | ||
76 | "jmp 4b\n" | ||
77 | "5:\n\t" | ||
78 | : [slock] "+m" (lock->slock) | ||
79 | : [flags] "r" ((u32)flags) | ||
80 | CLI_STI_INPUT_ARGS | ||
81 | : "memory" CLI_STI_CLOBBERS); | ||
82 | } | ||
83 | #endif | ||
84 | |||
85 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
86 | { | ||
87 | _slock_t oldval; | ||
88 | |||
89 | asm volatile( | ||
90 | LOCK_INS_XCH " %0,%1" | ||
91 | :"=q" (oldval), "+m" (lock->slock) | ||
92 | :"0" (0) : "memory"); | ||
93 | |||
94 | return oldval > 0; | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * __raw_spin_unlock based on writing $1 to the low byte. | ||
99 | * This method works. Despite all the confusion. | ||
100 | * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) | ||
101 | * (PPro errata 66, 92) | ||
102 | */ | ||
103 | |||
104 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | ||
105 | |||
106 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
107 | { | ||
108 | asm volatile(LOCK_INS_MOV " $1,%0" : "=m" (lock->slock) :: "memory"); | ||
109 | } | ||
110 | |||
111 | #else | ||
112 | |||
113 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
114 | { | ||
115 | unsigned char oldval = 1; | ||
116 | |||
117 | asm volatile("xchgb %b0, %1" | ||
118 | : "=q" (oldval), "+m" (lock->slock) | ||
119 | : "0" (oldval) : "memory"); | ||
120 | } | ||
121 | |||
122 | #endif | ||
123 | |||
124 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
125 | { | ||
126 | while (__raw_spin_is_locked(lock)) | ||
127 | cpu_relax(); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Read-write spinlocks, allowing multiple readers | ||
132 | * but only one writer. | ||
133 | * | ||
134 | * NOTE! it is quite common to have readers in interrupts | ||
135 | * but no interrupt writers. For those circumstances we | ||
136 | * can "mix" irq-safe locks - any writer needs to get a | ||
137 | * irq-safe write-lock, but readers can get non-irqsafe | ||
138 | * read-locks. | ||
139 | * | ||
140 | * On x86, we implement read-write locks as a 32-bit counter | ||
141 | * with the high bit (sign) being the "contended" bit. | ||
142 | */ | ||
143 | |||
144 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | ||
145 | { | ||
146 | return (int)(lock)->lock > 0; | ||
147 | } | ||
148 | |||
149 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | ||
150 | { | ||
151 | return (lock)->lock == RW_LOCK_BIAS; | ||
152 | } | ||
153 | |||
154 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
155 | { | ||
156 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | ||
157 | "jns 1f\n" | ||
158 | "call __read_lock_failed\n\t" | ||
159 | "1:\n" | ||
160 | ::LOCK_PTR_REG (rw) : "memory"); | ||
161 | } | ||
162 | |||
163 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
164 | { | ||
165 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | ||
166 | "jz 1f\n" | ||
167 | "call __write_lock_failed\n\t" | ||
168 | "1:\n" | ||
169 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
170 | } | ||
171 | |||
172 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | ||
173 | { | ||
174 | atomic_t *count = (atomic_t *)lock; | ||
175 | |||
176 | atomic_dec(count); | ||
177 | if (atomic_read(count) >= 0) | ||
178 | return 1; | ||
179 | atomic_inc(count); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | ||
184 | { | ||
185 | atomic_t *count = (atomic_t *)lock; | ||
186 | |||
187 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | ||
188 | return 1; | ||
189 | atomic_add(RW_LOCK_BIAS, count); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
194 | { | ||
195 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | ||
196 | } | ||
197 | |||
198 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
199 | { | ||
200 | asm volatile(LOCK_PREFIX "addl %1, %0" | ||
201 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | ||
202 | } | ||
203 | |||
204 | #define _raw_spin_relax(lock) cpu_relax() | ||
205 | #define _raw_read_relax(lock) cpu_relax() | ||
206 | #define _raw_write_relax(lock) cpu_relax() | ||
207 | |||
208 | #endif /* __ASM_SPINLOCK_H */ | ||
diff --git a/include/asm-x86/spinlock_64.h b/include/asm-x86/spinlock_64.h deleted file mode 100644 index f5ba90b7335..00000000000 --- a/include/asm-x86/spinlock_64.h +++ /dev/null | |||
@@ -1,186 +0,0 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | #include <asm/rwlock.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <asm/processor.h> | ||
8 | |||
9 | /* | ||
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
11 | * | ||
12 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
13 | * on the local processor, one does not. | ||
14 | * | ||
15 | * We make no fairness assumptions. They have a cost. | ||
16 | * | ||
17 | * (the type definitions are in asm/spinlock_types.h) | ||
18 | */ | ||
19 | |||
20 | typedef int _slock_t; | ||
21 | #define LOCK_INS_DEC "decl" | ||
22 | #define LOCK_INS_XCH "xchgl" | ||
23 | #define LOCK_INS_MOV "movl" | ||
24 | #define LOCK_INS_CMP "cmpl" | ||
25 | #define LOCK_PTR_REG "D" | ||
26 | |||
27 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
28 | { | ||
29 | return *(volatile _slock_t *)(&(lock)->slock) <= 0; | ||
30 | } | ||
31 | |||
32 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
33 | { | ||
34 | asm volatile( | ||
35 | "\n1:\t" | ||
36 | LOCK_PREFIX " ; " LOCK_INS_DEC " %0\n\t" | ||
37 | "jns 3f\n" | ||
38 | "2:\t" | ||
39 | "rep;nop\n\t" | ||
40 | LOCK_INS_CMP " $0,%0\n\t" | ||
41 | "jle 2b\n\t" | ||
42 | "jmp 1b\n" | ||
43 | "3:\n\t" | ||
44 | : "+m" (lock->slock) : : "memory"); | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * It is easier for the lock validator if interrupts are not re-enabled | ||
49 | * in the middle of a lock-acquire. This is a performance feature anyway | ||
50 | * so we turn it off: | ||
51 | * | ||
52 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
53 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | ||
54 | */ | ||
55 | #ifndef CONFIG_PROVE_LOCKING | ||
56 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | ||
57 | unsigned long flags) | ||
58 | { | ||
59 | asm volatile( | ||
60 | "\n1:\t" | ||
61 | LOCK_PREFIX " ; " LOCK_INS_DEC " %[slock]\n\t" | ||
62 | "jns 5f\n" | ||
63 | "testl $0x200, %[flags]\n\t" | ||
64 | "jz 4f\n\t" | ||
65 | STI_STRING "\n" | ||
66 | "3:\t" | ||
67 | "rep;nop\n\t" | ||
68 | LOCK_INS_CMP " $0, %[slock]\n\t" | ||
69 | "jle 3b\n\t" | ||
70 | CLI_STRING "\n\t" | ||
71 | "jmp 1b\n" | ||
72 | "4:\t" | ||
73 | "rep;nop\n\t" | ||
74 | LOCK_INS_CMP " $0, %[slock]\n\t" | ||
75 | "jg 1b\n\t" | ||
76 | "jmp 4b\n" | ||
77 | "5:\n\t" | ||
78 | : [slock] "+m" (lock->slock) | ||
79 | : [flags] "r" ((u32)flags) | ||
80 | CLI_STI_INPUT_ARGS | ||
81 | : "memory" CLI_STI_CLOBBERS); | ||
82 | } | ||
83 | #endif | ||
84 | |||
85 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
86 | { | ||
87 | _slock_t oldval; | ||
88 | |||
89 | asm volatile( | ||
90 | LOCK_INS_XCH " %0,%1" | ||
91 | :"=q" (oldval), "+m" (lock->slock) | ||
92 | :"0" (0) : "memory"); | ||
93 | |||
94 | return oldval > 0; | ||
95 | } | ||
96 | |||
97 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
98 | { | ||
99 | asm volatile(LOCK_INS_MOV " $1,%0" : "=m" (lock->slock) :: "memory"); | ||
100 | } | ||
101 | |||
102 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
103 | { | ||
104 | while (__raw_spin_is_locked(lock)) | ||
105 | cpu_relax(); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Read-write spinlocks, allowing multiple readers | ||
110 | * but only one writer. | ||
111 | * | ||
112 | * NOTE! it is quite common to have readers in interrupts | ||
113 | * but no interrupt writers. For those circumstances we | ||
114 | * can "mix" irq-safe locks - any writer needs to get a | ||
115 | * irq-safe write-lock, but readers can get non-irqsafe | ||
116 | * read-locks. | ||
117 | * | ||
118 | * On x86, we implement read-write locks as a 32-bit counter | ||
119 | * with the high bit (sign) being the "contended" bit. | ||
120 | */ | ||
121 | |||
122 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | ||
123 | { | ||
124 | return (int)(lock)->lock > 0; | ||
125 | } | ||
126 | |||
127 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | ||
128 | { | ||
129 | return (lock)->lock == RW_LOCK_BIAS; | ||
130 | } | ||
131 | |||
132 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
133 | { | ||
134 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | ||
135 | "jns 1f\n" | ||
136 | "call __read_lock_failed\n\t" | ||
137 | "1:\n" | ||
138 | ::LOCK_PTR_REG (rw) : "memory"); | ||
139 | } | ||
140 | |||
141 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
142 | { | ||
143 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | ||
144 | "jz 1f\n" | ||
145 | "call __write_lock_failed\n\t" | ||
146 | "1:\n" | ||
147 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
148 | } | ||
149 | |||
150 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | ||
151 | { | ||
152 | atomic_t *count = (atomic_t *)lock; | ||
153 | |||
154 | atomic_dec(count); | ||
155 | if (atomic_read(count) >= 0) | ||
156 | return 1; | ||
157 | atomic_inc(count); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | ||
162 | { | ||
163 | atomic_t *count = (atomic_t *)lock; | ||
164 | |||
165 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | ||
166 | return 1; | ||
167 | atomic_add(RW_LOCK_BIAS, count); | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
172 | { | ||
173 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | ||
174 | } | ||
175 | |||
176 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
177 | { | ||
178 | asm volatile(LOCK_PREFIX "addl %1, %0" | ||
179 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | ||
180 | } | ||
181 | |||
182 | #define _raw_spin_relax(lock) cpu_relax() | ||
183 | #define _raw_read_relax(lock) cpu_relax() | ||
184 | #define _raw_write_relax(lock) cpu_relax() | ||
185 | |||
186 | #endif /* __ASM_SPINLOCK_H */ | ||