diff options
Diffstat (limited to 'include/asm-x86/bitops.h')
-rw-r--r-- | include/asm-x86/bitops.h | 316 |
1 files changed, 316 insertions, 0 deletions
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 07e3f6d4fe47..1a23ce1a5697 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h | |||
@@ -1,5 +1,321 @@ | |||
1 | #ifndef _ASM_X86_BITOPS_H | ||
2 | #define _ASM_X86_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 1992, Linus Torvalds. | ||
6 | */ | ||
7 | |||
8 | #ifndef _LINUX_BITOPS_H | ||
9 | #error only <linux/bitops.h> can be included directly | ||
10 | #endif | ||
11 | |||
12 | #include <linux/compiler.h> | ||
13 | #include <asm/alternative.h> | ||
14 | |||
15 | /* | ||
16 | * These have to be done with inline assembly: that way the bit-setting | ||
17 | * is guaranteed to be atomic. All bit operations return 0 if the bit | ||
18 | * was cleared before the operation and != 0 if it was not. | ||
19 | * | ||
20 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | ||
21 | */ | ||
22 | |||
23 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | ||
24 | /* Technically wrong, but this avoids compilation errors on some gcc | ||
25 | versions. */ | ||
26 | #define ADDR "=m" (*(volatile long *) addr) | ||
27 | #else | ||
28 | #define ADDR "+m" (*(volatile long *) addr) | ||
29 | #endif | ||
30 | |||
31 | /** | ||
32 | * set_bit - Atomically set a bit in memory | ||
33 | * @nr: the bit to set | ||
34 | * @addr: the address to start counting from | ||
35 | * | ||
36 | * This function is atomic and may not be reordered. See __set_bit() | ||
37 | * if you do not require the atomic guarantees. | ||
38 | * | ||
39 | * Note: there are no guarantees that this function will not be reordered | ||
40 | * on non x86 architectures, so if you are writing portable code, | ||
41 | * make sure not to rely on its reordering guarantees. | ||
42 | * | ||
43 | * Note that @nr may be almost arbitrarily large; this function is not | ||
44 | * restricted to acting on a single-word quantity. | ||
45 | */ | ||
46 | static inline void set_bit(int nr, volatile void *addr) | ||
47 | { | ||
48 | asm volatile(LOCK_PREFIX "bts %1,%0" | ||
49 | : ADDR | ||
50 | : "Ir" (nr) : "memory"); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * __set_bit - Set a bit in memory | ||
55 | * @nr: the bit to set | ||
56 | * @addr: the address to start counting from | ||
57 | * | ||
58 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
59 | * If it's called on the same region of memory simultaneously, the effect | ||
60 | * may be that only one operation succeeds. | ||
61 | */ | ||
62 | static inline void __set_bit(int nr, volatile void *addr) | ||
63 | { | ||
64 | asm volatile("bts %1,%0" | ||
65 | : ADDR | ||
66 | : "Ir" (nr) : "memory"); | ||
67 | } | ||
68 | |||
69 | |||
70 | /** | ||
71 | * clear_bit - Clears a bit in memory | ||
72 | * @nr: Bit to clear | ||
73 | * @addr: Address to start counting from | ||
74 | * | ||
75 | * clear_bit() is atomic and may not be reordered. However, it does | ||
76 | * not contain a memory barrier, so if it is used for locking purposes, | ||
77 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
78 | * in order to ensure changes are visible on other processors. | ||
79 | */ | ||
80 | static inline void clear_bit(int nr, volatile void *addr) | ||
81 | { | ||
82 | asm volatile(LOCK_PREFIX "btr %1,%0" | ||
83 | : ADDR | ||
84 | : "Ir" (nr)); | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * clear_bit_unlock - Clears a bit in memory | ||
89 | * @nr: Bit to clear | ||
90 | * @addr: Address to start counting from | ||
91 | * | ||
92 | * clear_bit() is atomic and implies release semantics before the memory | ||
93 | * operation. It can be used for an unlock. | ||
94 | */ | ||
95 | static inline void clear_bit_unlock(unsigned nr, volatile void *addr) | ||
96 | { | ||
97 | barrier(); | ||
98 | clear_bit(nr, addr); | ||
99 | } | ||
100 | |||
101 | static inline void __clear_bit(int nr, volatile void *addr) | ||
102 | { | ||
103 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * __clear_bit_unlock - Clears a bit in memory | ||
108 | * @nr: Bit to clear | ||
109 | * @addr: Address to start counting from | ||
110 | * | ||
111 | * __clear_bit() is non-atomic and implies release semantics before the memory | ||
112 | * operation. It can be used for an unlock if no other CPUs can concurrently | ||
113 | * modify other bits in the word. | ||
114 | * | ||
115 | * No memory barrier is required here, because x86 cannot reorder stores past | ||
116 | * older loads. Same principle as spin_unlock. | ||
117 | */ | ||
118 | static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) | ||
119 | { | ||
120 | barrier(); | ||
121 | __clear_bit(nr, addr); | ||
122 | } | ||
123 | |||
124 | #define smp_mb__before_clear_bit() barrier() | ||
125 | #define smp_mb__after_clear_bit() barrier() | ||
126 | |||
127 | /** | ||
128 | * __change_bit - Toggle a bit in memory | ||
129 | * @nr: the bit to change | ||
130 | * @addr: the address to start counting from | ||
131 | * | ||
132 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
133 | * If it's called on the same region of memory simultaneously, the effect | ||
134 | * may be that only one operation succeeds. | ||
135 | */ | ||
136 | static inline void __change_bit(int nr, volatile void *addr) | ||
137 | { | ||
138 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | ||
139 | } | ||
140 | |||
141 | /** | ||
142 | * change_bit - Toggle a bit in memory | ||
143 | * @nr: Bit to change | ||
144 | * @addr: Address to start counting from | ||
145 | * | ||
146 | * change_bit() is atomic and may not be reordered. | ||
147 | * Note that @nr may be almost arbitrarily large; this function is not | ||
148 | * restricted to acting on a single-word quantity. | ||
149 | */ | ||
150 | static inline void change_bit(int nr, volatile void *addr) | ||
151 | { | ||
152 | asm volatile(LOCK_PREFIX "btc %1,%0" | ||
153 | : ADDR : "Ir" (nr)); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * test_and_set_bit - Set a bit and return its old value | ||
158 | * @nr: Bit to set | ||
159 | * @addr: Address to count from | ||
160 | * | ||
161 | * This operation is atomic and cannot be reordered. | ||
162 | * It also implies a memory barrier. | ||
163 | */ | ||
164 | static inline int test_and_set_bit(int nr, volatile void *addr) | ||
165 | { | ||
166 | int oldbit; | ||
167 | |||
168 | asm volatile(LOCK_PREFIX "bts %2,%1\n\t" | ||
169 | "sbb %0,%0" | ||
170 | : "=r" (oldbit), ADDR | ||
171 | : "Ir" (nr) : "memory"); | ||
172 | |||
173 | return oldbit; | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * test_and_set_bit_lock - Set a bit and return its old value for lock | ||
178 | * @nr: Bit to set | ||
179 | * @addr: Address to count from | ||
180 | * | ||
181 | * This is the same as test_and_set_bit on x86. | ||
182 | */ | ||
183 | static inline int test_and_set_bit_lock(int nr, volatile void *addr) | ||
184 | { | ||
185 | return test_and_set_bit(nr, addr); | ||
186 | } | ||
187 | |||
188 | /** | ||
189 | * __test_and_set_bit - Set a bit and return its old value | ||
190 | * @nr: Bit to set | ||
191 | * @addr: Address to count from | ||
192 | * | ||
193 | * This operation is non-atomic and can be reordered. | ||
194 | * If two examples of this operation race, one can appear to succeed | ||
195 | * but actually fail. You must protect multiple accesses with a lock. | ||
196 | */ | ||
197 | static inline int __test_and_set_bit(int nr, volatile void *addr) | ||
198 | { | ||
199 | int oldbit; | ||
200 | |||
201 | asm("bts %2,%1\n\t" | ||
202 | "sbb %0,%0" | ||
203 | : "=r" (oldbit), ADDR | ||
204 | : "Ir" (nr)); | ||
205 | return oldbit; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * test_and_clear_bit - Clear a bit and return its old value | ||
210 | * @nr: Bit to clear | ||
211 | * @addr: Address to count from | ||
212 | * | ||
213 | * This operation is atomic and cannot be reordered. | ||
214 | * It also implies a memory barrier. | ||
215 | */ | ||
216 | static inline int test_and_clear_bit(int nr, volatile void *addr) | ||
217 | { | ||
218 | int oldbit; | ||
219 | |||
220 | asm volatile(LOCK_PREFIX "btr %2,%1\n\t" | ||
221 | "sbb %0,%0" | ||
222 | : "=r" (oldbit), ADDR | ||
223 | : "Ir" (nr) : "memory"); | ||
224 | |||
225 | return oldbit; | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * __test_and_clear_bit - Clear a bit and return its old value | ||
230 | * @nr: Bit to clear | ||
231 | * @addr: Address to count from | ||
232 | * | ||
233 | * This operation is non-atomic and can be reordered. | ||
234 | * If two examples of this operation race, one can appear to succeed | ||
235 | * but actually fail. You must protect multiple accesses with a lock. | ||
236 | */ | ||
237 | static inline int __test_and_clear_bit(int nr, volatile void *addr) | ||
238 | { | ||
239 | int oldbit; | ||
240 | |||
241 | asm volatile("btr %2,%1\n\t" | ||
242 | "sbb %0,%0" | ||
243 | : "=r" (oldbit), ADDR | ||
244 | : "Ir" (nr)); | ||
245 | return oldbit; | ||
246 | } | ||
247 | |||
248 | /* WARNING: non atomic and it can be reordered! */ | ||
249 | static inline int __test_and_change_bit(int nr, volatile void *addr) | ||
250 | { | ||
251 | int oldbit; | ||
252 | |||
253 | asm volatile("btc %2,%1\n\t" | ||
254 | "sbb %0,%0" | ||
255 | : "=r" (oldbit), ADDR | ||
256 | : "Ir" (nr) : "memory"); | ||
257 | |||
258 | return oldbit; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * test_and_change_bit - Change a bit and return its old value | ||
263 | * @nr: Bit to change | ||
264 | * @addr: Address to count from | ||
265 | * | ||
266 | * This operation is atomic and cannot be reordered. | ||
267 | * It also implies a memory barrier. | ||
268 | */ | ||
269 | static inline int test_and_change_bit(int nr, volatile void *addr) | ||
270 | { | ||
271 | int oldbit; | ||
272 | |||
273 | asm volatile(LOCK_PREFIX "btc %2,%1\n\t" | ||
274 | "sbb %0,%0" | ||
275 | : "=r" (oldbit), ADDR | ||
276 | : "Ir" (nr) : "memory"); | ||
277 | |||
278 | return oldbit; | ||
279 | } | ||
280 | |||
281 | static inline int constant_test_bit(int nr, const volatile void *addr) | ||
282 | { | ||
283 | return ((1UL << (nr % BITS_PER_LONG)) & | ||
284 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | ||
285 | } | ||
286 | |||
287 | static inline int variable_test_bit(int nr, volatile const void *addr) | ||
288 | { | ||
289 | int oldbit; | ||
290 | |||
291 | asm volatile("bt %2,%1\n\t" | ||
292 | "sbb %0,%0" | ||
293 | : "=r" (oldbit) | ||
294 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | ||
295 | |||
296 | return oldbit; | ||
297 | } | ||
298 | |||
299 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | ||
300 | /** | ||
301 | * test_bit - Determine whether a bit is set | ||
302 | * @nr: bit number to test | ||
303 | * @addr: Address to start counting from | ||
304 | */ | ||
305 | static int test_bit(int nr, const volatile unsigned long *addr); | ||
306 | #endif | ||
307 | |||
308 | #define test_bit(nr,addr) \ | ||
309 | (__builtin_constant_p(nr) ? \ | ||
310 | constant_test_bit((nr),(addr)) : \ | ||
311 | variable_test_bit((nr),(addr))) | ||
312 | |||
313 | #undef ADDR | ||
314 | |||
1 | #ifdef CONFIG_X86_32 | 315 | #ifdef CONFIG_X86_32 |
2 | # include "bitops_32.h" | 316 | # include "bitops_32.h" |
3 | #else | 317 | #else |
4 | # include "bitops_64.h" | 318 | # include "bitops_64.h" |
5 | #endif | 319 | #endif |
320 | |||
321 | #endif /* _ASM_X86_BITOPS_H */ | ||