diff options
author | Jeff Dike <jdike@addtoit.com> | 2007-05-08 03:35:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 14:15:20 -0400 |
commit | a436ed9c5106b41606cbb55ab3b28389fe8ae04f (patch) | |
tree | b8df0bde6d7eb2808c37da815d8857396ee1eaf1 /include/asm-x86_64 | |
parent | 5dc12ddee93d63d7107cbbf70db23476d7b30e43 (diff) |
x86: create asm/cmpxchg.h
i386:
Rearrange the cmpxchg code to allow atomic.h to get it without needing to
include system.h. This kills warnings in the UML build from atomic.h about
implicit declarations of cmpxchg symbols. The i386 build presumably isn't
seeing this because a separate inclusion of system.h is covering it over.
The cmpxchg stuff is moved to asm-i386/cmpxchg.h, with an include left in
system.h for the benefit of generic code which expects cmpxchg there.
Meanwhile, atomic.h includes cmpxchg.h.
This causes no noticable damage to the i386 build.
x86_64:
Move cmpxchg into its own header. atomic.h already included system.h, so
this is changed to include cmpxchg.h.
This is purely cleanup - it's not fixing any warnings - so if the x86_64
system.h isn't considered as cleanup-worthy as i386, then this can be
dropped.
It causes no noticable damage to the x86_64 build.
uml:
The i386 and x86_64 cmpxchg patches require an asm-um/cmpxchg.h for the
UML build.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r-- | include/asm-x86_64/atomic.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/cmpxchg.h | 134 | ||||
-rw-r--r-- | include/asm-x86_64/system.h | 130 |
3 files changed, 136 insertions, 130 deletions
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 19e0c607b568..f2e64634fa48 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define __ARCH_X86_64_ATOMIC__ | 2 | #define __ARCH_X86_64_ATOMIC__ |
3 | 3 | ||
4 | #include <asm/alternative.h> | 4 | #include <asm/alternative.h> |
5 | #include <asm/system.h> | 5 | #include <asm/cmpxchg.h> |
6 | 6 | ||
7 | /* atomic_t should be 32 bit signed type */ | 7 | /* atomic_t should be 32 bit signed type */ |
8 | 8 | ||
diff --git a/include/asm-x86_64/cmpxchg.h b/include/asm-x86_64/cmpxchg.h new file mode 100644 index 000000000000..09a6b6b6b74d --- /dev/null +++ b/include/asm-x86_64/cmpxchg.h | |||
@@ -0,0 +1,134 @@ | |||
1 | #ifndef __ASM_CMPXCHG_H | ||
2 | #define __ASM_CMPXCHG_H | ||
3 | |||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | ||
5 | |||
6 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | ||
7 | |||
8 | #define __xg(x) ((volatile long *)(x)) | ||
9 | |||
10 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | ||
11 | { | ||
12 | *ptr = val; | ||
13 | } | ||
14 | |||
15 | #define _set_64bit set_64bit | ||
16 | |||
17 | /* | ||
18 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
19 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
20 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
21 | */ | ||
22 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | ||
23 | { | ||
24 | switch (size) { | ||
25 | case 1: | ||
26 | __asm__ __volatile__("xchgb %b0,%1" | ||
27 | :"=q" (x) | ||
28 | :"m" (*__xg(ptr)), "0" (x) | ||
29 | :"memory"); | ||
30 | break; | ||
31 | case 2: | ||
32 | __asm__ __volatile__("xchgw %w0,%1" | ||
33 | :"=r" (x) | ||
34 | :"m" (*__xg(ptr)), "0" (x) | ||
35 | :"memory"); | ||
36 | break; | ||
37 | case 4: | ||
38 | __asm__ __volatile__("xchgl %k0,%1" | ||
39 | :"=r" (x) | ||
40 | :"m" (*__xg(ptr)), "0" (x) | ||
41 | :"memory"); | ||
42 | break; | ||
43 | case 8: | ||
44 | __asm__ __volatile__("xchgq %0,%1" | ||
45 | :"=r" (x) | ||
46 | :"m" (*__xg(ptr)), "0" (x) | ||
47 | :"memory"); | ||
48 | break; | ||
49 | } | ||
50 | return x; | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
55 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
56 | * indicated by comparing RETURN with OLD. | ||
57 | */ | ||
58 | |||
59 | #define __HAVE_ARCH_CMPXCHG 1 | ||
60 | |||
61 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
62 | unsigned long new, int size) | ||
63 | { | ||
64 | unsigned long prev; | ||
65 | switch (size) { | ||
66 | case 1: | ||
67 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
68 | : "=a"(prev) | ||
69 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
70 | : "memory"); | ||
71 | return prev; | ||
72 | case 2: | ||
73 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
74 | : "=a"(prev) | ||
75 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
76 | : "memory"); | ||
77 | return prev; | ||
78 | case 4: | ||
79 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" | ||
80 | : "=a"(prev) | ||
81 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
82 | : "memory"); | ||
83 | return prev; | ||
84 | case 8: | ||
85 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" | ||
86 | : "=a"(prev) | ||
87 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
88 | : "memory"); | ||
89 | return prev; | ||
90 | } | ||
91 | return old; | ||
92 | } | ||
93 | |||
94 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
95 | unsigned long old, unsigned long new, int size) | ||
96 | { | ||
97 | unsigned long prev; | ||
98 | switch (size) { | ||
99 | case 1: | ||
100 | __asm__ __volatile__("cmpxchgb %b1,%2" | ||
101 | : "=a"(prev) | ||
102 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
103 | : "memory"); | ||
104 | return prev; | ||
105 | case 2: | ||
106 | __asm__ __volatile__("cmpxchgw %w1,%2" | ||
107 | : "=a"(prev) | ||
108 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
109 | : "memory"); | ||
110 | return prev; | ||
111 | case 4: | ||
112 | __asm__ __volatile__("cmpxchgl %k1,%2" | ||
113 | : "=a"(prev) | ||
114 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
115 | : "memory"); | ||
116 | return prev; | ||
117 | case 8: | ||
118 | __asm__ __volatile__("cmpxchgq %1,%2" | ||
119 | : "=a"(prev) | ||
120 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
121 | : "memory"); | ||
122 | return prev; | ||
123 | } | ||
124 | return old; | ||
125 | } | ||
126 | |||
127 | #define cmpxchg(ptr,o,n)\ | ||
128 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | ||
129 | (unsigned long)(n),sizeof(*(ptr)))) | ||
130 | #define cmpxchg_local(ptr,o,n)\ | ||
131 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | ||
132 | (unsigned long)(n),sizeof(*(ptr)))) | ||
133 | |||
134 | #endif | ||
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 5316f3cac230..b7b8021e8c43 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <asm/segment.h> | 5 | #include <asm/segment.h> |
6 | #include <asm/alternative.h> | 6 | #include <asm/cmpxchg.h> |
7 | 7 | ||
8 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
9 | 9 | ||
@@ -124,134 +124,6 @@ static inline void sched_cacheflush(void) | |||
124 | 124 | ||
125 | #define nop() __asm__ __volatile__ ("nop") | 125 | #define nop() __asm__ __volatile__ ("nop") |
126 | 126 | ||
127 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | ||
128 | |||
129 | #define __xg(x) ((volatile long *)(x)) | ||
130 | |||
131 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | ||
132 | { | ||
133 | *ptr = val; | ||
134 | } | ||
135 | |||
136 | #define _set_64bit set_64bit | ||
137 | |||
138 | /* | ||
139 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
140 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
141 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
142 | */ | ||
143 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | ||
144 | { | ||
145 | switch (size) { | ||
146 | case 1: | ||
147 | __asm__ __volatile__("xchgb %b0,%1" | ||
148 | :"=q" (x) | ||
149 | :"m" (*__xg(ptr)), "0" (x) | ||
150 | :"memory"); | ||
151 | break; | ||
152 | case 2: | ||
153 | __asm__ __volatile__("xchgw %w0,%1" | ||
154 | :"=r" (x) | ||
155 | :"m" (*__xg(ptr)), "0" (x) | ||
156 | :"memory"); | ||
157 | break; | ||
158 | case 4: | ||
159 | __asm__ __volatile__("xchgl %k0,%1" | ||
160 | :"=r" (x) | ||
161 | :"m" (*__xg(ptr)), "0" (x) | ||
162 | :"memory"); | ||
163 | break; | ||
164 | case 8: | ||
165 | __asm__ __volatile__("xchgq %0,%1" | ||
166 | :"=r" (x) | ||
167 | :"m" (*__xg(ptr)), "0" (x) | ||
168 | :"memory"); | ||
169 | break; | ||
170 | } | ||
171 | return x; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
176 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
177 | * indicated by comparing RETURN with OLD. | ||
178 | */ | ||
179 | |||
180 | #define __HAVE_ARCH_CMPXCHG 1 | ||
181 | |||
182 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
183 | unsigned long new, int size) | ||
184 | { | ||
185 | unsigned long prev; | ||
186 | switch (size) { | ||
187 | case 1: | ||
188 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
189 | : "=a"(prev) | ||
190 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
191 | : "memory"); | ||
192 | return prev; | ||
193 | case 2: | ||
194 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
195 | : "=a"(prev) | ||
196 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
197 | : "memory"); | ||
198 | return prev; | ||
199 | case 4: | ||
200 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" | ||
201 | : "=a"(prev) | ||
202 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
203 | : "memory"); | ||
204 | return prev; | ||
205 | case 8: | ||
206 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" | ||
207 | : "=a"(prev) | ||
208 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
209 | : "memory"); | ||
210 | return prev; | ||
211 | } | ||
212 | return old; | ||
213 | } | ||
214 | |||
215 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
216 | unsigned long old, unsigned long new, int size) | ||
217 | { | ||
218 | unsigned long prev; | ||
219 | switch (size) { | ||
220 | case 1: | ||
221 | __asm__ __volatile__("cmpxchgb %b1,%2" | ||
222 | : "=a"(prev) | ||
223 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
224 | : "memory"); | ||
225 | return prev; | ||
226 | case 2: | ||
227 | __asm__ __volatile__("cmpxchgw %w1,%2" | ||
228 | : "=a"(prev) | ||
229 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
230 | : "memory"); | ||
231 | return prev; | ||
232 | case 4: | ||
233 | __asm__ __volatile__("cmpxchgl %k1,%2" | ||
234 | : "=a"(prev) | ||
235 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
236 | : "memory"); | ||
237 | return prev; | ||
238 | case 8: | ||
239 | __asm__ __volatile__("cmpxchgq %1,%2" | ||
240 | : "=a"(prev) | ||
241 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
242 | : "memory"); | ||
243 | return prev; | ||
244 | } | ||
245 | return old; | ||
246 | } | ||
247 | |||
248 | #define cmpxchg(ptr,o,n)\ | ||
249 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | ||
250 | (unsigned long)(n),sizeof(*(ptr)))) | ||
251 | #define cmpxchg_local(ptr,o,n)\ | ||
252 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | ||
253 | (unsigned long)(n),sizeof(*(ptr)))) | ||
254 | |||
255 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
256 | #define smp_mb() mb() | 128 | #define smp_mb() mb() |
257 | #define smp_rmb() rmb() | 129 | #define smp_rmb() rmb() |